]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_display.c
Merge tag 'drm-intel-next-2019-04-04' into gvt-next
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_drv.h"
51 #include "intel_dsi.h"
52 #include "intel_frontbuffer.h"
53
54 #include "intel_drv.h"
55 #include "intel_dsi.h"
56 #include "intel_frontbuffer.h"
57
58 #include "i915_drv.h"
59 #include "i915_gem_clflush.h"
60 #include "i915_reset.h"
61 #include "i915_trace.h"
62
63 /* Primary plane formats for gen <= 3 */
64 static const u32 i8xx_primary_formats[] = {
65 DRM_FORMAT_C8,
66 DRM_FORMAT_RGB565,
67 DRM_FORMAT_XRGB1555,
68 DRM_FORMAT_XRGB8888,
69 };
70
71 /* Primary plane formats for gen >= 4 */
72 static const u32 i965_primary_formats[] = {
73 DRM_FORMAT_C8,
74 DRM_FORMAT_RGB565,
75 DRM_FORMAT_XRGB8888,
76 DRM_FORMAT_XBGR8888,
77 DRM_FORMAT_XRGB2101010,
78 DRM_FORMAT_XBGR2101010,
79 };
80
81 static const u64 i9xx_format_modifiers[] = {
82 I915_FORMAT_MOD_X_TILED,
83 DRM_FORMAT_MOD_LINEAR,
84 DRM_FORMAT_MOD_INVALID
85 };
86
87 /* Cursor formats */
88 static const u32 intel_cursor_formats[] = {
89 DRM_FORMAT_ARGB8888,
90 };
91
92 static const u64 cursor_format_modifiers[] = {
93 DRM_FORMAT_MOD_LINEAR,
94 DRM_FORMAT_MOD_INVALID
95 };
96
97 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
98 struct intel_crtc_state *pipe_config);
99 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
100 struct intel_crtc_state *pipe_config);
101
102 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
103 struct drm_i915_gem_object *obj,
104 struct drm_mode_fb_cmd2 *mode_cmd);
105 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
106 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
107 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
108 const struct intel_link_m_n *m_n,
109 const struct intel_link_m_n *m2_n2);
110 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
111 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
112 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
113 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
114 static void vlv_prepare_pll(struct intel_crtc *crtc,
115 const struct intel_crtc_state *pipe_config);
116 static void chv_prepare_pll(struct intel_crtc *crtc,
117 const struct intel_crtc_state *pipe_config);
118 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
119 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
120 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
121 struct intel_crtc_state *crtc_state);
122 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
123 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
124 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126 struct drm_modeset_acquire_ctx *ctx);
127 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
128
129 struct intel_limit {
130 struct {
131 int min, max;
132 } dot, vco, n, m, m1, m2, p, p1;
133
134 struct {
135 int dot_limit;
136 int p2_slow, p2_fast;
137 } p2;
138 };
139
140 /* returns HPLL frequency in kHz */
141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
142 {
143 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
144
145 /* Obtain SKU information */
146 mutex_lock(&dev_priv->sb_lock);
147 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148 CCK_FUSE_HPLL_FREQ_MASK;
149 mutex_unlock(&dev_priv->sb_lock);
150
151 return vco_freq[hpll_freq] * 1000;
152 }
153
154 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
155 const char *name, u32 reg, int ref_freq)
156 {
157 u32 val;
158 int divider;
159
160 mutex_lock(&dev_priv->sb_lock);
161 val = vlv_cck_read(dev_priv, reg);
162 mutex_unlock(&dev_priv->sb_lock);
163
164 divider = val & CCK_FREQUENCY_VALUES;
165
166 WARN((val & CCK_FREQUENCY_STATUS) !=
167 (divider << CCK_FREQUENCY_STATUS_SHIFT),
168 "%s change in progress\n", name);
169
170 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
171 }
172
173 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
174 const char *name, u32 reg)
175 {
176 if (dev_priv->hpll_freq == 0)
177 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
178
179 return vlv_get_cck_clock(dev_priv, name, reg,
180 dev_priv->hpll_freq);
181 }
182
183 static void intel_update_czclk(struct drm_i915_private *dev_priv)
184 {
185 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
186 return;
187
188 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
189 CCK_CZ_CLOCK_CONTROL);
190
191 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
192 }
193
194 static inline u32 /* units of 100MHz */
195 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
196 const struct intel_crtc_state *pipe_config)
197 {
198 if (HAS_DDI(dev_priv))
199 return pipe_config->port_clock; /* SPLL */
200 else
201 return dev_priv->fdi_pll_freq;
202 }
203
204 static const struct intel_limit intel_limits_i8xx_dac = {
205 .dot = { .min = 25000, .max = 350000 },
206 .vco = { .min = 908000, .max = 1512000 },
207 .n = { .min = 2, .max = 16 },
208 .m = { .min = 96, .max = 140 },
209 .m1 = { .min = 18, .max = 26 },
210 .m2 = { .min = 6, .max = 16 },
211 .p = { .min = 4, .max = 128 },
212 .p1 = { .min = 2, .max = 33 },
213 .p2 = { .dot_limit = 165000,
214 .p2_slow = 4, .p2_fast = 2 },
215 };
216
217 static const struct intel_limit intel_limits_i8xx_dvo = {
218 .dot = { .min = 25000, .max = 350000 },
219 .vco = { .min = 908000, .max = 1512000 },
220 .n = { .min = 2, .max = 16 },
221 .m = { .min = 96, .max = 140 },
222 .m1 = { .min = 18, .max = 26 },
223 .m2 = { .min = 6, .max = 16 },
224 .p = { .min = 4, .max = 128 },
225 .p1 = { .min = 2, .max = 33 },
226 .p2 = { .dot_limit = 165000,
227 .p2_slow = 4, .p2_fast = 4 },
228 };
229
230 static const struct intel_limit intel_limits_i8xx_lvds = {
231 .dot = { .min = 25000, .max = 350000 },
232 .vco = { .min = 908000, .max = 1512000 },
233 .n = { .min = 2, .max = 16 },
234 .m = { .min = 96, .max = 140 },
235 .m1 = { .min = 18, .max = 26 },
236 .m2 = { .min = 6, .max = 16 },
237 .p = { .min = 4, .max = 128 },
238 .p1 = { .min = 1, .max = 6 },
239 .p2 = { .dot_limit = 165000,
240 .p2_slow = 14, .p2_fast = 7 },
241 };
242
243 static const struct intel_limit intel_limits_i9xx_sdvo = {
244 .dot = { .min = 20000, .max = 400000 },
245 .vco = { .min = 1400000, .max = 2800000 },
246 .n = { .min = 1, .max = 6 },
247 .m = { .min = 70, .max = 120 },
248 .m1 = { .min = 8, .max = 18 },
249 .m2 = { .min = 3, .max = 7 },
250 .p = { .min = 5, .max = 80 },
251 .p1 = { .min = 1, .max = 8 },
252 .p2 = { .dot_limit = 200000,
253 .p2_slow = 10, .p2_fast = 5 },
254 };
255
256 static const struct intel_limit intel_limits_i9xx_lvds = {
257 .dot = { .min = 20000, .max = 400000 },
258 .vco = { .min = 1400000, .max = 2800000 },
259 .n = { .min = 1, .max = 6 },
260 .m = { .min = 70, .max = 120 },
261 .m1 = { .min = 8, .max = 18 },
262 .m2 = { .min = 3, .max = 7 },
263 .p = { .min = 7, .max = 98 },
264 .p1 = { .min = 1, .max = 8 },
265 .p2 = { .dot_limit = 112000,
266 .p2_slow = 14, .p2_fast = 7 },
267 };
268
269
270 static const struct intel_limit intel_limits_g4x_sdvo = {
271 .dot = { .min = 25000, .max = 270000 },
272 .vco = { .min = 1750000, .max = 3500000},
273 .n = { .min = 1, .max = 4 },
274 .m = { .min = 104, .max = 138 },
275 .m1 = { .min = 17, .max = 23 },
276 .m2 = { .min = 5, .max = 11 },
277 .p = { .min = 10, .max = 30 },
278 .p1 = { .min = 1, .max = 3},
279 .p2 = { .dot_limit = 270000,
280 .p2_slow = 10,
281 .p2_fast = 10
282 },
283 };
284
285 static const struct intel_limit intel_limits_g4x_hdmi = {
286 .dot = { .min = 22000, .max = 400000 },
287 .vco = { .min = 1750000, .max = 3500000},
288 .n = { .min = 1, .max = 4 },
289 .m = { .min = 104, .max = 138 },
290 .m1 = { .min = 16, .max = 23 },
291 .m2 = { .min = 5, .max = 11 },
292 .p = { .min = 5, .max = 80 },
293 .p1 = { .min = 1, .max = 8},
294 .p2 = { .dot_limit = 165000,
295 .p2_slow = 10, .p2_fast = 5 },
296 };
297
298 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
299 .dot = { .min = 20000, .max = 115000 },
300 .vco = { .min = 1750000, .max = 3500000 },
301 .n = { .min = 1, .max = 3 },
302 .m = { .min = 104, .max = 138 },
303 .m1 = { .min = 17, .max = 23 },
304 .m2 = { .min = 5, .max = 11 },
305 .p = { .min = 28, .max = 112 },
306 .p1 = { .min = 2, .max = 8 },
307 .p2 = { .dot_limit = 0,
308 .p2_slow = 14, .p2_fast = 14
309 },
310 };
311
312 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
313 .dot = { .min = 80000, .max = 224000 },
314 .vco = { .min = 1750000, .max = 3500000 },
315 .n = { .min = 1, .max = 3 },
316 .m = { .min = 104, .max = 138 },
317 .m1 = { .min = 17, .max = 23 },
318 .m2 = { .min = 5, .max = 11 },
319 .p = { .min = 14, .max = 42 },
320 .p1 = { .min = 2, .max = 6 },
321 .p2 = { .dot_limit = 0,
322 .p2_slow = 7, .p2_fast = 7
323 },
324 };
325
326 static const struct intel_limit intel_limits_pineview_sdvo = {
327 .dot = { .min = 20000, .max = 400000},
328 .vco = { .min = 1700000, .max = 3500000 },
329 /* Pineview's Ncounter is a ring counter */
330 .n = { .min = 3, .max = 6 },
331 .m = { .min = 2, .max = 256 },
332 /* Pineview only has one combined m divider, which we treat as m2. */
333 .m1 = { .min = 0, .max = 0 },
334 .m2 = { .min = 0, .max = 254 },
335 .p = { .min = 5, .max = 80 },
336 .p1 = { .min = 1, .max = 8 },
337 .p2 = { .dot_limit = 200000,
338 .p2_slow = 10, .p2_fast = 5 },
339 };
340
341 static const struct intel_limit intel_limits_pineview_lvds = {
342 .dot = { .min = 20000, .max = 400000 },
343 .vco = { .min = 1700000, .max = 3500000 },
344 .n = { .min = 3, .max = 6 },
345 .m = { .min = 2, .max = 256 },
346 .m1 = { .min = 0, .max = 0 },
347 .m2 = { .min = 0, .max = 254 },
348 .p = { .min = 7, .max = 112 },
349 .p1 = { .min = 1, .max = 8 },
350 .p2 = { .dot_limit = 112000,
351 .p2_slow = 14, .p2_fast = 14 },
352 };
353
354 /* Ironlake / Sandybridge
355 *
356 * We calculate clock using (register_value + 2) for N/M1/M2, so here
357 * the range value for them is (actual_value - 2).
358 */
359 static const struct intel_limit intel_limits_ironlake_dac = {
360 .dot = { .min = 25000, .max = 350000 },
361 .vco = { .min = 1760000, .max = 3510000 },
362 .n = { .min = 1, .max = 5 },
363 .m = { .min = 79, .max = 127 },
364 .m1 = { .min = 12, .max = 22 },
365 .m2 = { .min = 5, .max = 9 },
366 .p = { .min = 5, .max = 80 },
367 .p1 = { .min = 1, .max = 8 },
368 .p2 = { .dot_limit = 225000,
369 .p2_slow = 10, .p2_fast = 5 },
370 };
371
372 static const struct intel_limit intel_limits_ironlake_single_lvds = {
373 .dot = { .min = 25000, .max = 350000 },
374 .vco = { .min = 1760000, .max = 3510000 },
375 .n = { .min = 1, .max = 3 },
376 .m = { .min = 79, .max = 118 },
377 .m1 = { .min = 12, .max = 22 },
378 .m2 = { .min = 5, .max = 9 },
379 .p = { .min = 28, .max = 112 },
380 .p1 = { .min = 2, .max = 8 },
381 .p2 = { .dot_limit = 225000,
382 .p2_slow = 14, .p2_fast = 14 },
383 };
384
385 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
386 .dot = { .min = 25000, .max = 350000 },
387 .vco = { .min = 1760000, .max = 3510000 },
388 .n = { .min = 1, .max = 3 },
389 .m = { .min = 79, .max = 127 },
390 .m1 = { .min = 12, .max = 22 },
391 .m2 = { .min = 5, .max = 9 },
392 .p = { .min = 14, .max = 56 },
393 .p1 = { .min = 2, .max = 8 },
394 .p2 = { .dot_limit = 225000,
395 .p2_slow = 7, .p2_fast = 7 },
396 };
397
398 /* LVDS 100mhz refclk limits. */
399 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
400 .dot = { .min = 25000, .max = 350000 },
401 .vco = { .min = 1760000, .max = 3510000 },
402 .n = { .min = 1, .max = 2 },
403 .m = { .min = 79, .max = 126 },
404 .m1 = { .min = 12, .max = 22 },
405 .m2 = { .min = 5, .max = 9 },
406 .p = { .min = 28, .max = 112 },
407 .p1 = { .min = 2, .max = 8 },
408 .p2 = { .dot_limit = 225000,
409 .p2_slow = 14, .p2_fast = 14 },
410 };
411
412 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
413 .dot = { .min = 25000, .max = 350000 },
414 .vco = { .min = 1760000, .max = 3510000 },
415 .n = { .min = 1, .max = 3 },
416 .m = { .min = 79, .max = 126 },
417 .m1 = { .min = 12, .max = 22 },
418 .m2 = { .min = 5, .max = 9 },
419 .p = { .min = 14, .max = 42 },
420 .p1 = { .min = 2, .max = 6 },
421 .p2 = { .dot_limit = 225000,
422 .p2_slow = 7, .p2_fast = 7 },
423 };
424
425 static const struct intel_limit intel_limits_vlv = {
426 /*
427 * These are the data rate limits (measured in fast clocks)
428 * since those are the strictest limits we have. The fast
429 * clock and actual rate limits are more relaxed, so checking
430 * them would make no difference.
431 */
432 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
433 .vco = { .min = 4000000, .max = 6000000 },
434 .n = { .min = 1, .max = 7 },
435 .m1 = { .min = 2, .max = 3 },
436 .m2 = { .min = 11, .max = 156 },
437 .p1 = { .min = 2, .max = 3 },
438 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
439 };
440
441 static const struct intel_limit intel_limits_chv = {
442 /*
443 * These are the data rate limits (measured in fast clocks)
444 * since those are the strictest limits we have. The fast
445 * clock and actual rate limits are more relaxed, so checking
446 * them would make no difference.
447 */
448 .dot = { .min = 25000 * 5, .max = 540000 * 5},
449 .vco = { .min = 4800000, .max = 6480000 },
450 .n = { .min = 1, .max = 1 },
451 .m1 = { .min = 2, .max = 2 },
452 .m2 = { .min = 24 << 22, .max = 175 << 22 },
453 .p1 = { .min = 2, .max = 4 },
454 .p2 = { .p2_slow = 1, .p2_fast = 14 },
455 };
456
457 static const struct intel_limit intel_limits_bxt = {
458 /* FIXME: find real dot limits */
459 .dot = { .min = 0, .max = INT_MAX },
460 .vco = { .min = 4800000, .max = 6700000 },
461 .n = { .min = 1, .max = 1 },
462 .m1 = { .min = 2, .max = 2 },
463 /* FIXME: find real m2 limits */
464 .m2 = { .min = 2 << 22, .max = 255 << 22 },
465 .p1 = { .min = 2, .max = 4 },
466 .p2 = { .p2_slow = 1, .p2_fast = 20 },
467 };
468
469 static void
470 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
471 {
472 if (enable)
473 I915_WRITE(CLKGATE_DIS_PSL(pipe),
474 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
475 else
476 I915_WRITE(CLKGATE_DIS_PSL(pipe),
477 I915_READ(CLKGATE_DIS_PSL(pipe)) &
478 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
479 }
480
481 static bool
482 needs_modeset(const struct drm_crtc_state *state)
483 {
484 return drm_atomic_crtc_needs_modeset(state);
485 }
486
487 /*
488 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
489 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
490 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
491 * The helpers' return value is the rate of the clock that is fed to the
492 * display engine's pipe which can be the above fast dot clock rate or a
493 * divided-down version of it.
494 */
495 /* m1 is reserved as 0 in Pineview, n is a ring counter */
496 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
497 {
498 clock->m = clock->m2 + 2;
499 clock->p = clock->p1 * clock->p2;
500 if (WARN_ON(clock->n == 0 || clock->p == 0))
501 return 0;
502 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
503 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
504
505 return clock->dot;
506 }
507
508 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
509 {
510 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
511 }
512
513 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
514 {
515 clock->m = i9xx_dpll_compute_m(clock);
516 clock->p = clock->p1 * clock->p2;
517 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
518 return 0;
519 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
520 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
521
522 return clock->dot;
523 }
524
525 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
526 {
527 clock->m = clock->m1 * clock->m2;
528 clock->p = clock->p1 * clock->p2;
529 if (WARN_ON(clock->n == 0 || clock->p == 0))
530 return 0;
531 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
532 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
533
534 return clock->dot / 5;
535 }
536
537 int chv_calc_dpll_params(int refclk, struct dpll *clock)
538 {
539 clock->m = clock->m1 * clock->m2;
540 clock->p = clock->p1 * clock->p2;
541 if (WARN_ON(clock->n == 0 || clock->p == 0))
542 return 0;
543 clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
544 clock->n << 22);
545 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
546
547 return clock->dot / 5;
548 }
549
550 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
551
552 /*
553 * Returns whether the given set of divisors are valid for a given refclk with
554 * the given connectors.
555 */
556 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
557 const struct intel_limit *limit,
558 const struct dpll *clock)
559 {
560 if (clock->n < limit->n.min || limit->n.max < clock->n)
561 INTELPllInvalid("n out of range\n");
562 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
563 INTELPllInvalid("p1 out of range\n");
564 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
565 INTELPllInvalid("m2 out of range\n");
566 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
567 INTELPllInvalid("m1 out of range\n");
568
569 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
570 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
571 if (clock->m1 <= clock->m2)
572 INTELPllInvalid("m1 <= m2\n");
573
574 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
575 !IS_GEN9_LP(dev_priv)) {
576 if (clock->p < limit->p.min || limit->p.max < clock->p)
577 INTELPllInvalid("p out of range\n");
578 if (clock->m < limit->m.min || limit->m.max < clock->m)
579 INTELPllInvalid("m out of range\n");
580 }
581
582 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
583 INTELPllInvalid("vco out of range\n");
584 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
585 * connector, etc., rather than just a single range.
586 */
587 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
588 INTELPllInvalid("dot out of range\n");
589
590 return true;
591 }
592
593 static int
594 i9xx_select_p2_div(const struct intel_limit *limit,
595 const struct intel_crtc_state *crtc_state,
596 int target)
597 {
598 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
599
600 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
601 /*
602 * For LVDS just rely on its current settings for dual-channel.
603 * We haven't figured out how to reliably set up different
604 * single/dual channel state, if we even can.
605 */
606 if (intel_is_dual_link_lvds(dev_priv))
607 return limit->p2.p2_fast;
608 else
609 return limit->p2.p2_slow;
610 } else {
611 if (target < limit->p2.dot_limit)
612 return limit->p2.p2_slow;
613 else
614 return limit->p2.p2_fast;
615 }
616 }
617
618 /*
619 * Returns a set of divisors for the desired target clock with the given
620 * refclk, or FALSE. The returned values represent the clock equation:
621 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
622 *
623 * Target and reference clocks are specified in kHz.
624 *
625 * If match_clock is provided, then best_clock P divider must match the P
626 * divider from @match_clock used for LVDS downclocking.
627 */
628 static bool
629 i9xx_find_best_dpll(const struct intel_limit *limit,
630 struct intel_crtc_state *crtc_state,
631 int target, int refclk, struct dpll *match_clock,
632 struct dpll *best_clock)
633 {
634 struct drm_device *dev = crtc_state->base.crtc->dev;
635 struct dpll clock;
636 int err = target;
637
638 memset(best_clock, 0, sizeof(*best_clock));
639
640 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
641
642 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
643 clock.m1++) {
644 for (clock.m2 = limit->m2.min;
645 clock.m2 <= limit->m2.max; clock.m2++) {
646 if (clock.m2 >= clock.m1)
647 break;
648 for (clock.n = limit->n.min;
649 clock.n <= limit->n.max; clock.n++) {
650 for (clock.p1 = limit->p1.min;
651 clock.p1 <= limit->p1.max; clock.p1++) {
652 int this_err;
653
654 i9xx_calc_dpll_params(refclk, &clock);
655 if (!intel_PLL_is_valid(to_i915(dev),
656 limit,
657 &clock))
658 continue;
659 if (match_clock &&
660 clock.p != match_clock->p)
661 continue;
662
663 this_err = abs(clock.dot - target);
664 if (this_err < err) {
665 *best_clock = clock;
666 err = this_err;
667 }
668 }
669 }
670 }
671 }
672
673 return (err != target);
674 }
675
676 /*
677 * Returns a set of divisors for the desired target clock with the given
678 * refclk, or FALSE. The returned values represent the clock equation:
679 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
680 *
681 * Target and reference clocks are specified in kHz.
682 *
683 * If match_clock is provided, then best_clock P divider must match the P
684 * divider from @match_clock used for LVDS downclocking.
685 */
686 static bool
687 pnv_find_best_dpll(const struct intel_limit *limit,
688 struct intel_crtc_state *crtc_state,
689 int target, int refclk, struct dpll *match_clock,
690 struct dpll *best_clock)
691 {
692 struct drm_device *dev = crtc_state->base.crtc->dev;
693 struct dpll clock;
694 int err = target;
695
696 memset(best_clock, 0, sizeof(*best_clock));
697
698 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
699
700 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
701 clock.m1++) {
702 for (clock.m2 = limit->m2.min;
703 clock.m2 <= limit->m2.max; clock.m2++) {
704 for (clock.n = limit->n.min;
705 clock.n <= limit->n.max; clock.n++) {
706 for (clock.p1 = limit->p1.min;
707 clock.p1 <= limit->p1.max; clock.p1++) {
708 int this_err;
709
710 pnv_calc_dpll_params(refclk, &clock);
711 if (!intel_PLL_is_valid(to_i915(dev),
712 limit,
713 &clock))
714 continue;
715 if (match_clock &&
716 clock.p != match_clock->p)
717 continue;
718
719 this_err = abs(clock.dot - target);
720 if (this_err < err) {
721 *best_clock = clock;
722 err = this_err;
723 }
724 }
725 }
726 }
727 }
728
729 return (err != target);
730 }
731
732 /*
733 * Returns a set of divisors for the desired target clock with the given
734 * refclk, or FALSE. The returned values represent the clock equation:
735 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
736 *
737 * Target and reference clocks are specified in kHz.
738 *
739 * If match_clock is provided, then best_clock P divider must match the P
740 * divider from @match_clock used for LVDS downclocking.
741 */
742 static bool
743 g4x_find_best_dpll(const struct intel_limit *limit,
744 struct intel_crtc_state *crtc_state,
745 int target, int refclk, struct dpll *match_clock,
746 struct dpll *best_clock)
747 {
748 struct drm_device *dev = crtc_state->base.crtc->dev;
749 struct dpll clock;
750 int max_n;
751 bool found = false;
752 /* approximately equals target * 0.00585 */
753 int err_most = (target >> 8) + (target >> 9);
754
755 memset(best_clock, 0, sizeof(*best_clock));
756
757 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
758
759 max_n = limit->n.max;
760 /* based on hardware requirement, prefer smaller n to precision */
761 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
762 /* based on hardware requirement, prefere larger m1,m2 */
763 for (clock.m1 = limit->m1.max;
764 clock.m1 >= limit->m1.min; clock.m1--) {
765 for (clock.m2 = limit->m2.max;
766 clock.m2 >= limit->m2.min; clock.m2--) {
767 for (clock.p1 = limit->p1.max;
768 clock.p1 >= limit->p1.min; clock.p1--) {
769 int this_err;
770
771 i9xx_calc_dpll_params(refclk, &clock);
772 if (!intel_PLL_is_valid(to_i915(dev),
773 limit,
774 &clock))
775 continue;
776
777 this_err = abs(clock.dot - target);
778 if (this_err < err_most) {
779 *best_clock = clock;
780 err_most = this_err;
781 max_n = clock.n;
782 found = true;
783 }
784 }
785 }
786 }
787 }
788 return found;
789 }
790
791 /*
792 * Check if the calculated PLL configuration is more optimal compared to the
793 * best configuration and error found so far. Return the calculated error.
794 */
795 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
796 const struct dpll *calculated_clock,
797 const struct dpll *best_clock,
798 unsigned int best_error_ppm,
799 unsigned int *error_ppm)
800 {
801 /*
802 * For CHV ignore the error and consider only the P value.
803 * Prefer a bigger P value based on HW requirements.
804 */
805 if (IS_CHERRYVIEW(to_i915(dev))) {
806 *error_ppm = 0;
807
808 return calculated_clock->p > best_clock->p;
809 }
810
811 if (WARN_ON_ONCE(!target_freq))
812 return false;
813
814 *error_ppm = div_u64(1000000ULL *
815 abs(target_freq - calculated_clock->dot),
816 target_freq);
817 /*
818 * Prefer a better P value over a better (smaller) error if the error
819 * is small. Ensure this preference for future configurations too by
820 * setting the error to 0.
821 */
822 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
823 *error_ppm = 0;
824
825 return true;
826 }
827
828 return *error_ppm + 10 < best_error_ppm;
829 }
830
831 /*
832 * Returns a set of divisors for the desired target clock with the given
833 * refclk, or FALSE. The returned values represent the clock equation:
834 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
835 */
836 static bool
837 vlv_find_best_dpll(const struct intel_limit *limit,
838 struct intel_crtc_state *crtc_state,
839 int target, int refclk, struct dpll *match_clock,
840 struct dpll *best_clock)
841 {
842 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
843 struct drm_device *dev = crtc->base.dev;
844 struct dpll clock;
845 unsigned int bestppm = 1000000;
846 /* min update 19.2 MHz */
847 int max_n = min(limit->n.max, refclk / 19200);
848 bool found = false;
849
850 target *= 5; /* fast clock */
851
852 memset(best_clock, 0, sizeof(*best_clock));
853
854 /* based on hardware requirement, prefer smaller n to precision */
855 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
856 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
857 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
858 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859 clock.p = clock.p1 * clock.p2;
860 /* based on hardware requirement, prefer bigger m1,m2 values */
861 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
862 unsigned int ppm;
863
864 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
865 refclk * clock.m1);
866
867 vlv_calc_dpll_params(refclk, &clock);
868
869 if (!intel_PLL_is_valid(to_i915(dev),
870 limit,
871 &clock))
872 continue;
873
874 if (!vlv_PLL_is_optimal(dev, target,
875 &clock,
876 best_clock,
877 bestppm, &ppm))
878 continue;
879
880 *best_clock = clock;
881 bestppm = ppm;
882 found = true;
883 }
884 }
885 }
886 }
887
888 return found;
889 }
890
891 /*
892 * Returns a set of divisors for the desired target clock with the given
893 * refclk, or FALSE. The returned values represent the clock equation:
894 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
895 */
896 static bool
897 chv_find_best_dpll(const struct intel_limit *limit,
898 struct intel_crtc_state *crtc_state,
899 int target, int refclk, struct dpll *match_clock,
900 struct dpll *best_clock)
901 {
902 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
903 struct drm_device *dev = crtc->base.dev;
904 unsigned int best_error_ppm;
905 struct dpll clock;
906 u64 m2;
907 int found = false;
908
909 memset(best_clock, 0, sizeof(*best_clock));
910 best_error_ppm = 1000000;
911
912 /*
913 * Based on hardware doc, the n always set to 1, and m1 always
914 * set to 2. If requires to support 200Mhz refclk, we need to
915 * revisit this because n may not 1 anymore.
916 */
917 clock.n = 1, clock.m1 = 2;
918 target *= 5; /* fast clock */
919
920 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
921 for (clock.p2 = limit->p2.p2_fast;
922 clock.p2 >= limit->p2.p2_slow;
923 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
924 unsigned int error_ppm;
925
926 clock.p = clock.p1 * clock.p2;
927
928 m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
929 clock.n) << 22, refclk * clock.m1);
930
931 if (m2 > INT_MAX/clock.m1)
932 continue;
933
934 clock.m2 = m2;
935
936 chv_calc_dpll_params(refclk, &clock);
937
938 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
939 continue;
940
941 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
942 best_error_ppm, &error_ppm))
943 continue;
944
945 *best_clock = clock;
946 best_error_ppm = error_ppm;
947 found = true;
948 }
949 }
950
951 return found;
952 }
953
954 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
955 struct dpll *best_clock)
956 {
957 int refclk = 100000;
958 const struct intel_limit *limit = &intel_limits_bxt;
959
960 return chv_find_best_dpll(limit, crtc_state,
961 crtc_state->port_clock, refclk,
962 NULL, best_clock);
963 }
964
965 bool intel_crtc_active(struct intel_crtc *crtc)
966 {
967 /* Be paranoid as we can arrive here with only partial
968 * state retrieved from the hardware during setup.
969 *
970 * We can ditch the adjusted_mode.crtc_clock check as soon
971 * as Haswell has gained clock readout/fastboot support.
972 *
973 * We can ditch the crtc->primary->state->fb check as soon as we can
974 * properly reconstruct framebuffers.
975 *
976 * FIXME: The intel_crtc->active here should be switched to
977 * crtc->state->active once we have proper CRTC states wired up
978 * for atomic.
979 */
980 return crtc->active && crtc->base.primary->state->fb &&
981 crtc->config->base.adjusted_mode.crtc_clock;
982 }
983
984 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
985 enum pipe pipe)
986 {
987 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
988
989 return crtc->config->cpu_transcoder;
990 }
991
992 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
993 enum pipe pipe)
994 {
995 i915_reg_t reg = PIPEDSL(pipe);
996 u32 line1, line2;
997 u32 line_mask;
998
999 if (IS_GEN(dev_priv, 2))
1000 line_mask = DSL_LINEMASK_GEN2;
1001 else
1002 line_mask = DSL_LINEMASK_GEN3;
1003
1004 line1 = I915_READ(reg) & line_mask;
1005 msleep(5);
1006 line2 = I915_READ(reg) & line_mask;
1007
1008 return line1 != line2;
1009 }
1010
1011 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1012 {
1013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1014 enum pipe pipe = crtc->pipe;
1015
1016 /* Wait for the display line to settle/start moving */
1017 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1018 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1019 pipe_name(pipe), onoff(state));
1020 }
1021
1022 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1023 {
1024 wait_for_pipe_scanline_moving(crtc, false);
1025 }
1026
1027 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1028 {
1029 wait_for_pipe_scanline_moving(crtc, true);
1030 }
1031
1032 static void
1033 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1034 {
1035 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1036 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1037
1038 if (INTEL_GEN(dev_priv) >= 4) {
1039 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1040 i915_reg_t reg = PIPECONF(cpu_transcoder);
1041
1042 /* Wait for the Pipe State to go off */
1043 if (intel_wait_for_register(&dev_priv->uncore,
1044 reg, I965_PIPECONF_ACTIVE, 0,
1045 100))
1046 WARN(1, "pipe_off wait timed out\n");
1047 } else {
1048 intel_wait_for_pipe_scanline_stopped(crtc);
1049 }
1050 }
1051
1052 /* Only for pre-ILK configs */
1053 void assert_pll(struct drm_i915_private *dev_priv,
1054 enum pipe pipe, bool state)
1055 {
1056 u32 val;
1057 bool cur_state;
1058
1059 val = I915_READ(DPLL(pipe));
1060 cur_state = !!(val & DPLL_VCO_ENABLE);
1061 I915_STATE_WARN(cur_state != state,
1062 "PLL state assertion failure (expected %s, current %s)\n",
1063 onoff(state), onoff(cur_state));
1064 }
1065
1066 /* XXX: the dsi pll is shared between MIPI DSI ports */
1067 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1068 {
1069 u32 val;
1070 bool cur_state;
1071
1072 mutex_lock(&dev_priv->sb_lock);
1073 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1074 mutex_unlock(&dev_priv->sb_lock);
1075
1076 cur_state = val & DSI_PLL_VCO_EN;
1077 I915_STATE_WARN(cur_state != state,
1078 "DSI PLL state assertion failure (expected %s, current %s)\n",
1079 onoff(state), onoff(cur_state));
1080 }
1081
1082 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1083 enum pipe pipe, bool state)
1084 {
1085 bool cur_state;
1086 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1087 pipe);
1088
1089 if (HAS_DDI(dev_priv)) {
1090 /* DDI does not have a specific FDI_TX register */
1091 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1092 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1093 } else {
1094 u32 val = I915_READ(FDI_TX_CTL(pipe));
1095 cur_state = !!(val & FDI_TX_ENABLE);
1096 }
1097 I915_STATE_WARN(cur_state != state,
1098 "FDI TX state assertion failure (expected %s, current %s)\n",
1099 onoff(state), onoff(cur_state));
1100 }
1101 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1102 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1103
1104 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1105 enum pipe pipe, bool state)
1106 {
1107 u32 val;
1108 bool cur_state;
1109
1110 val = I915_READ(FDI_RX_CTL(pipe));
1111 cur_state = !!(val & FDI_RX_ENABLE);
1112 I915_STATE_WARN(cur_state != state,
1113 "FDI RX state assertion failure (expected %s, current %s)\n",
1114 onoff(state), onoff(cur_state));
1115 }
1116 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1117 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1118
1119 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1120 enum pipe pipe)
1121 {
1122 u32 val;
1123
1124 /* ILK FDI PLL is always enabled */
1125 if (IS_GEN(dev_priv, 5))
1126 return;
1127
1128 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1129 if (HAS_DDI(dev_priv))
1130 return;
1131
1132 val = I915_READ(FDI_TX_CTL(pipe));
1133 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1134 }
1135
1136 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1137 enum pipe pipe, bool state)
1138 {
1139 u32 val;
1140 bool cur_state;
1141
1142 val = I915_READ(FDI_RX_CTL(pipe));
1143 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1144 I915_STATE_WARN(cur_state != state,
1145 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1146 onoff(state), onoff(cur_state));
1147 }
1148
1149 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1150 {
1151 i915_reg_t pp_reg;
1152 u32 val;
1153 enum pipe panel_pipe = INVALID_PIPE;
1154 bool locked = true;
1155
1156 if (WARN_ON(HAS_DDI(dev_priv)))
1157 return;
1158
1159 if (HAS_PCH_SPLIT(dev_priv)) {
1160 u32 port_sel;
1161
1162 pp_reg = PP_CONTROL(0);
1163 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1164
1165 switch (port_sel) {
1166 case PANEL_PORT_SELECT_LVDS:
1167 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1168 break;
1169 case PANEL_PORT_SELECT_DPA:
1170 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1171 break;
1172 case PANEL_PORT_SELECT_DPC:
1173 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1174 break;
1175 case PANEL_PORT_SELECT_DPD:
1176 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1177 break;
1178 default:
1179 MISSING_CASE(port_sel);
1180 break;
1181 }
1182 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1183 /* presumably write lock depends on pipe, not port select */
1184 pp_reg = PP_CONTROL(pipe);
1185 panel_pipe = pipe;
1186 } else {
1187 u32 port_sel;
1188
1189 pp_reg = PP_CONTROL(0);
1190 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1191
1192 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1193 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1194 }
1195
1196 val = I915_READ(pp_reg);
1197 if (!(val & PANEL_POWER_ON) ||
1198 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1199 locked = false;
1200
1201 I915_STATE_WARN(panel_pipe == pipe && locked,
1202 "panel assertion failure, pipe %c regs locked\n",
1203 pipe_name(pipe));
1204 }
1205
1206 void assert_pipe(struct drm_i915_private *dev_priv,
1207 enum pipe pipe, bool state)
1208 {
1209 bool cur_state;
1210 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1211 pipe);
1212 enum intel_display_power_domain power_domain;
1213 intel_wakeref_t wakeref;
1214
1215 /* we keep both pipes enabled on 830 */
1216 if (IS_I830(dev_priv))
1217 state = true;
1218
1219 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1220 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1221 if (wakeref) {
1222 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1223 cur_state = !!(val & PIPECONF_ENABLE);
1224
1225 intel_display_power_put(dev_priv, power_domain, wakeref);
1226 } else {
1227 cur_state = false;
1228 }
1229
1230 I915_STATE_WARN(cur_state != state,
1231 "pipe %c assertion failure (expected %s, current %s)\n",
1232 pipe_name(pipe), onoff(state), onoff(cur_state));
1233 }
1234
1235 static void assert_plane(struct intel_plane *plane, bool state)
1236 {
1237 enum pipe pipe;
1238 bool cur_state;
1239
1240 cur_state = plane->get_hw_state(plane, &pipe);
1241
1242 I915_STATE_WARN(cur_state != state,
1243 "%s assertion failure (expected %s, current %s)\n",
1244 plane->base.name, onoff(state), onoff(cur_state));
1245 }
1246
1247 #define assert_plane_enabled(p) assert_plane(p, true)
1248 #define assert_plane_disabled(p) assert_plane(p, false)
1249
1250 static void assert_planes_disabled(struct intel_crtc *crtc)
1251 {
1252 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1253 struct intel_plane *plane;
1254
1255 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1256 assert_plane_disabled(plane);
1257 }
1258
1259 static void assert_vblank_disabled(struct drm_crtc *crtc)
1260 {
1261 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1262 drm_crtc_vblank_put(crtc);
1263 }
1264
1265 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1266 enum pipe pipe)
1267 {
1268 u32 val;
1269 bool enabled;
1270
1271 val = I915_READ(PCH_TRANSCONF(pipe));
1272 enabled = !!(val & TRANS_ENABLE);
1273 I915_STATE_WARN(enabled,
1274 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1275 pipe_name(pipe));
1276 }
1277
1278 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1279 enum pipe pipe, enum port port,
1280 i915_reg_t dp_reg)
1281 {
1282 enum pipe port_pipe;
1283 bool state;
1284
1285 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1286
1287 I915_STATE_WARN(state && port_pipe == pipe,
1288 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1289 port_name(port), pipe_name(pipe));
1290
1291 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1292 "IBX PCH DP %c still using transcoder B\n",
1293 port_name(port));
1294 }
1295
1296 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1297 enum pipe pipe, enum port port,
1298 i915_reg_t hdmi_reg)
1299 {
1300 enum pipe port_pipe;
1301 bool state;
1302
1303 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1304
1305 I915_STATE_WARN(state && port_pipe == pipe,
1306 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1307 port_name(port), pipe_name(pipe));
1308
1309 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1310 "IBX PCH HDMI %c still using transcoder B\n",
1311 port_name(port));
1312 }
1313
1314 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1315 enum pipe pipe)
1316 {
1317 enum pipe port_pipe;
1318
1319 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1320 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1321 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1322
1323 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1324 port_pipe == pipe,
1325 "PCH VGA enabled on transcoder %c, should be disabled\n",
1326 pipe_name(pipe));
1327
1328 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1329 port_pipe == pipe,
1330 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1331 pipe_name(pipe));
1332
1333 /* PCH SDVOB multiplex with HDMIB */
1334 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1335 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1336 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1337 }
1338
1339 static void _vlv_enable_pll(struct intel_crtc *crtc,
1340 const struct intel_crtc_state *pipe_config)
1341 {
1342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1343 enum pipe pipe = crtc->pipe;
1344
1345 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1346 POSTING_READ(DPLL(pipe));
1347 udelay(150);
1348
1349 if (intel_wait_for_register(&dev_priv->uncore,
1350 DPLL(pipe),
1351 DPLL_LOCK_VLV,
1352 DPLL_LOCK_VLV,
1353 1))
1354 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1355 }
1356
1357 static void vlv_enable_pll(struct intel_crtc *crtc,
1358 const struct intel_crtc_state *pipe_config)
1359 {
1360 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1361 enum pipe pipe = crtc->pipe;
1362
1363 assert_pipe_disabled(dev_priv, pipe);
1364
1365 /* PLL is protected by panel, make sure we can write it */
1366 assert_panel_unlocked(dev_priv, pipe);
1367
1368 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1369 _vlv_enable_pll(crtc, pipe_config);
1370
1371 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1372 POSTING_READ(DPLL_MD(pipe));
1373 }
1374
1375
1376 static void _chv_enable_pll(struct intel_crtc *crtc,
1377 const struct intel_crtc_state *pipe_config)
1378 {
1379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1380 enum pipe pipe = crtc->pipe;
1381 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1382 u32 tmp;
1383
1384 mutex_lock(&dev_priv->sb_lock);
1385
1386 /* Enable back the 10bit clock to display controller */
1387 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1388 tmp |= DPIO_DCLKP_EN;
1389 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1390
1391 mutex_unlock(&dev_priv->sb_lock);
1392
1393 /*
1394 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1395 */
1396 udelay(1);
1397
1398 /* Enable PLL */
1399 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1400
1401 /* Check PLL is locked */
1402 if (intel_wait_for_register(&dev_priv->uncore,
1403 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1404 1))
1405 DRM_ERROR("PLL %d failed to lock\n", pipe);
1406 }
1407
1408 static void chv_enable_pll(struct intel_crtc *crtc,
1409 const struct intel_crtc_state *pipe_config)
1410 {
1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1412 enum pipe pipe = crtc->pipe;
1413
1414 assert_pipe_disabled(dev_priv, pipe);
1415
1416 /* PLL is protected by panel, make sure we can write it */
1417 assert_panel_unlocked(dev_priv, pipe);
1418
1419 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1420 _chv_enable_pll(crtc, pipe_config);
1421
1422 if (pipe != PIPE_A) {
1423 /*
1424 * WaPixelRepeatModeFixForC0:chv
1425 *
1426 * DPLLCMD is AWOL. Use chicken bits to propagate
1427 * the value from DPLLBMD to either pipe B or C.
1428 */
1429 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1430 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1431 I915_WRITE(CBR4_VLV, 0);
1432 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1433
1434 /*
1435 * DPLLB VGA mode also seems to cause problems.
1436 * We should always have it disabled.
1437 */
1438 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1439 } else {
1440 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1441 POSTING_READ(DPLL_MD(pipe));
1442 }
1443 }
1444
1445 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1446 {
1447 if (IS_I830(dev_priv))
1448 return false;
1449
1450 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1451 }
1452
1453 static void i9xx_enable_pll(struct intel_crtc *crtc,
1454 const struct intel_crtc_state *crtc_state)
1455 {
1456 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1457 i915_reg_t reg = DPLL(crtc->pipe);
1458 u32 dpll = crtc_state->dpll_hw_state.dpll;
1459 int i;
1460
1461 assert_pipe_disabled(dev_priv, crtc->pipe);
1462
1463 /* PLL is protected by panel, make sure we can write it */
1464 if (i9xx_has_pps(dev_priv))
1465 assert_panel_unlocked(dev_priv, crtc->pipe);
1466
1467 /*
1468 * Apparently we need to have VGA mode enabled prior to changing
1469 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1470 * dividers, even though the register value does change.
1471 */
1472 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1473 I915_WRITE(reg, dpll);
1474
1475 /* Wait for the clocks to stabilize. */
1476 POSTING_READ(reg);
1477 udelay(150);
1478
1479 if (INTEL_GEN(dev_priv) >= 4) {
1480 I915_WRITE(DPLL_MD(crtc->pipe),
1481 crtc_state->dpll_hw_state.dpll_md);
1482 } else {
1483 /* The pixel multiplier can only be updated once the
1484 * DPLL is enabled and the clocks are stable.
1485 *
1486 * So write it again.
1487 */
1488 I915_WRITE(reg, dpll);
1489 }
1490
1491 /* We do this three times for luck */
1492 for (i = 0; i < 3; i++) {
1493 I915_WRITE(reg, dpll);
1494 POSTING_READ(reg);
1495 udelay(150); /* wait for warmup */
1496 }
1497 }
1498
1499 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1500 {
1501 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1502 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1503 enum pipe pipe = crtc->pipe;
1504
1505 /* Don't disable pipe or pipe PLLs if needed */
1506 if (IS_I830(dev_priv))
1507 return;
1508
1509 /* Make sure the pipe isn't still relying on us */
1510 assert_pipe_disabled(dev_priv, pipe);
1511
1512 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1513 POSTING_READ(DPLL(pipe));
1514 }
1515
1516 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1517 {
1518 u32 val;
1519
1520 /* Make sure the pipe isn't still relying on us */
1521 assert_pipe_disabled(dev_priv, pipe);
1522
1523 val = DPLL_INTEGRATED_REF_CLK_VLV |
1524 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1525 if (pipe != PIPE_A)
1526 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1527
1528 I915_WRITE(DPLL(pipe), val);
1529 POSTING_READ(DPLL(pipe));
1530 }
1531
1532 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1533 {
1534 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1535 u32 val;
1536
1537 /* Make sure the pipe isn't still relying on us */
1538 assert_pipe_disabled(dev_priv, pipe);
1539
1540 val = DPLL_SSC_REF_CLK_CHV |
1541 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1542 if (pipe != PIPE_A)
1543 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1544
1545 I915_WRITE(DPLL(pipe), val);
1546 POSTING_READ(DPLL(pipe));
1547
1548 mutex_lock(&dev_priv->sb_lock);
1549
1550 /* Disable 10bit clock to display controller */
1551 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1552 val &= ~DPIO_DCLKP_EN;
1553 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1554
1555 mutex_unlock(&dev_priv->sb_lock);
1556 }
1557
1558 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1559 struct intel_digital_port *dport,
1560 unsigned int expected_mask)
1561 {
1562 u32 port_mask;
1563 i915_reg_t dpll_reg;
1564
1565 switch (dport->base.port) {
1566 case PORT_B:
1567 port_mask = DPLL_PORTB_READY_MASK;
1568 dpll_reg = DPLL(0);
1569 break;
1570 case PORT_C:
1571 port_mask = DPLL_PORTC_READY_MASK;
1572 dpll_reg = DPLL(0);
1573 expected_mask <<= 4;
1574 break;
1575 case PORT_D:
1576 port_mask = DPLL_PORTD_READY_MASK;
1577 dpll_reg = DPIO_PHY_STATUS;
1578 break;
1579 default:
1580 BUG();
1581 }
1582
1583 if (intel_wait_for_register(&dev_priv->uncore,
1584 dpll_reg, port_mask, expected_mask,
1585 1000))
1586 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1587 port_name(dport->base.port),
1588 I915_READ(dpll_reg) & port_mask, expected_mask);
1589 }
1590
1591 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1592 {
1593 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1594 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1595 enum pipe pipe = crtc->pipe;
1596 i915_reg_t reg;
1597 u32 val, pipeconf_val;
1598
1599 /* Make sure PCH DPLL is enabled */
1600 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1601
1602 /* FDI must be feeding us bits for PCH ports */
1603 assert_fdi_tx_enabled(dev_priv, pipe);
1604 assert_fdi_rx_enabled(dev_priv, pipe);
1605
1606 if (HAS_PCH_CPT(dev_priv)) {
1607 /* Workaround: Set the timing override bit before enabling the
1608 * pch transcoder. */
1609 reg = TRANS_CHICKEN2(pipe);
1610 val = I915_READ(reg);
1611 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1612 I915_WRITE(reg, val);
1613 }
1614
1615 reg = PCH_TRANSCONF(pipe);
1616 val = I915_READ(reg);
1617 pipeconf_val = I915_READ(PIPECONF(pipe));
1618
1619 if (HAS_PCH_IBX(dev_priv)) {
1620 /*
1621 * Make the BPC in transcoder be consistent with
1622 * that in pipeconf reg. For HDMI we must use 8bpc
1623 * here for both 8bpc and 12bpc.
1624 */
1625 val &= ~PIPECONF_BPC_MASK;
1626 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1627 val |= PIPECONF_8BPC;
1628 else
1629 val |= pipeconf_val & PIPECONF_BPC_MASK;
1630 }
1631
1632 val &= ~TRANS_INTERLACE_MASK;
1633 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1634 if (HAS_PCH_IBX(dev_priv) &&
1635 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1636 val |= TRANS_LEGACY_INTERLACED_ILK;
1637 else
1638 val |= TRANS_INTERLACED;
1639 } else {
1640 val |= TRANS_PROGRESSIVE;
1641 }
1642
1643 I915_WRITE(reg, val | TRANS_ENABLE);
1644 if (intel_wait_for_register(&dev_priv->uncore,
1645 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1646 100))
1647 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1648 }
1649
1650 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1651 enum transcoder cpu_transcoder)
1652 {
1653 u32 val, pipeconf_val;
1654
1655 /* FDI must be feeding us bits for PCH ports */
1656 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1657 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1658
1659 /* Workaround: set timing override bit. */
1660 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1661 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1662 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1663
1664 val = TRANS_ENABLE;
1665 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1666
1667 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1668 PIPECONF_INTERLACED_ILK)
1669 val |= TRANS_INTERLACED;
1670 else
1671 val |= TRANS_PROGRESSIVE;
1672
1673 I915_WRITE(LPT_TRANSCONF, val);
1674 if (intel_wait_for_register(&dev_priv->uncore,
1675 LPT_TRANSCONF,
1676 TRANS_STATE_ENABLE,
1677 TRANS_STATE_ENABLE,
1678 100))
1679 DRM_ERROR("Failed to enable PCH transcoder\n");
1680 }
1681
1682 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1683 enum pipe pipe)
1684 {
1685 i915_reg_t reg;
1686 u32 val;
1687
1688 /* FDI relies on the transcoder */
1689 assert_fdi_tx_disabled(dev_priv, pipe);
1690 assert_fdi_rx_disabled(dev_priv, pipe);
1691
1692 /* Ports must be off as well */
1693 assert_pch_ports_disabled(dev_priv, pipe);
1694
1695 reg = PCH_TRANSCONF(pipe);
1696 val = I915_READ(reg);
1697 val &= ~TRANS_ENABLE;
1698 I915_WRITE(reg, val);
1699 /* wait for PCH transcoder off, transcoder state */
1700 if (intel_wait_for_register(&dev_priv->uncore,
1701 reg, TRANS_STATE_ENABLE, 0,
1702 50))
1703 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1704
1705 if (HAS_PCH_CPT(dev_priv)) {
1706 /* Workaround: Clear the timing override chicken bit again. */
1707 reg = TRANS_CHICKEN2(pipe);
1708 val = I915_READ(reg);
1709 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1710 I915_WRITE(reg, val);
1711 }
1712 }
1713
1714 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1715 {
1716 u32 val;
1717
1718 val = I915_READ(LPT_TRANSCONF);
1719 val &= ~TRANS_ENABLE;
1720 I915_WRITE(LPT_TRANSCONF, val);
1721 /* wait for PCH transcoder off, transcoder state */
1722 if (intel_wait_for_register(&dev_priv->uncore,
1723 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1724 50))
1725 DRM_ERROR("Failed to disable PCH transcoder\n");
1726
1727 /* Workaround: clear timing override bit. */
1728 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1729 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1730 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1731 }
1732
1733 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1734 {
1735 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1736
1737 if (HAS_PCH_LPT(dev_priv))
1738 return PIPE_A;
1739 else
1740 return crtc->pipe;
1741 }
1742
1743 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1744 {
1745 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1746
1747 /*
1748 * On i965gm the hardware frame counter reads
1749 * zero when the TV encoder is enabled :(
1750 */
1751 if (IS_I965GM(dev_priv) &&
1752 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1753 return 0;
1754
1755 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1756 return 0xffffffff; /* full 32 bit counter */
1757 else if (INTEL_GEN(dev_priv) >= 3)
1758 return 0xffffff; /* only 24 bits of frame count */
1759 else
1760 return 0; /* Gen2 doesn't have a hardware frame counter */
1761 }
1762
1763 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1764 {
1765 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1766
1767 drm_crtc_set_max_vblank_count(&crtc->base,
1768 intel_crtc_max_vblank_count(crtc_state));
1769 drm_crtc_vblank_on(&crtc->base);
1770 }
1771
1772 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1773 {
1774 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1775 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1776 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1777 enum pipe pipe = crtc->pipe;
1778 i915_reg_t reg;
1779 u32 val;
1780
1781 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1782
1783 assert_planes_disabled(crtc);
1784
1785 /*
1786 * A pipe without a PLL won't actually be able to drive bits from
1787 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1788 * need the check.
1789 */
1790 if (HAS_GMCH(dev_priv)) {
1791 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1792 assert_dsi_pll_enabled(dev_priv);
1793 else
1794 assert_pll_enabled(dev_priv, pipe);
1795 } else {
1796 if (new_crtc_state->has_pch_encoder) {
1797 /* if driving the PCH, we need FDI enabled */
1798 assert_fdi_rx_pll_enabled(dev_priv,
1799 intel_crtc_pch_transcoder(crtc));
1800 assert_fdi_tx_pll_enabled(dev_priv,
1801 (enum pipe) cpu_transcoder);
1802 }
1803 /* FIXME: assert CPU port conditions for SNB+ */
1804 }
1805
1806 trace_intel_pipe_enable(dev_priv, pipe);
1807
1808 reg = PIPECONF(cpu_transcoder);
1809 val = I915_READ(reg);
1810 if (val & PIPECONF_ENABLE) {
1811 /* we keep both pipes enabled on 830 */
1812 WARN_ON(!IS_I830(dev_priv));
1813 return;
1814 }
1815
1816 I915_WRITE(reg, val | PIPECONF_ENABLE);
1817 POSTING_READ(reg);
1818
1819 /*
1820 * Until the pipe starts PIPEDSL reads will return a stale value,
1821 * which causes an apparent vblank timestamp jump when PIPEDSL
1822 * resets to its proper value. That also messes up the frame count
1823 * when it's derived from the timestamps. So let's wait for the
1824 * pipe to start properly before we call drm_crtc_vblank_on()
1825 */
1826 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1827 intel_wait_for_pipe_scanline_moving(crtc);
1828 }
1829
1830 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1831 {
1832 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1833 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1834 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1835 enum pipe pipe = crtc->pipe;
1836 i915_reg_t reg;
1837 u32 val;
1838
1839 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1840
1841 /*
1842 * Make sure planes won't keep trying to pump pixels to us,
1843 * or we might hang the display.
1844 */
1845 assert_planes_disabled(crtc);
1846
1847 trace_intel_pipe_disable(dev_priv, pipe);
1848
1849 reg = PIPECONF(cpu_transcoder);
1850 val = I915_READ(reg);
1851 if ((val & PIPECONF_ENABLE) == 0)
1852 return;
1853
1854 /*
1855 * Double wide has implications for planes
1856 * so best keep it disabled when not needed.
1857 */
1858 if (old_crtc_state->double_wide)
1859 val &= ~PIPECONF_DOUBLE_WIDE;
1860
1861 /* Don't disable pipe or pipe PLLs if needed */
1862 if (!IS_I830(dev_priv))
1863 val &= ~PIPECONF_ENABLE;
1864
1865 I915_WRITE(reg, val);
1866 if ((val & PIPECONF_ENABLE) == 0)
1867 intel_wait_for_pipe_off(old_crtc_state);
1868 }
1869
1870 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1871 {
1872 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1873 }
1874
1875 static unsigned int
1876 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1877 {
1878 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1879 unsigned int cpp = fb->format->cpp[color_plane];
1880
1881 switch (fb->modifier) {
1882 case DRM_FORMAT_MOD_LINEAR:
1883 return cpp;
1884 case I915_FORMAT_MOD_X_TILED:
1885 if (IS_GEN(dev_priv, 2))
1886 return 128;
1887 else
1888 return 512;
1889 case I915_FORMAT_MOD_Y_TILED_CCS:
1890 if (color_plane == 1)
1891 return 128;
1892 /* fall through */
1893 case I915_FORMAT_MOD_Y_TILED:
1894 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1895 return 128;
1896 else
1897 return 512;
1898 case I915_FORMAT_MOD_Yf_TILED_CCS:
1899 if (color_plane == 1)
1900 return 128;
1901 /* fall through */
1902 case I915_FORMAT_MOD_Yf_TILED:
1903 switch (cpp) {
1904 case 1:
1905 return 64;
1906 case 2:
1907 case 4:
1908 return 128;
1909 case 8:
1910 case 16:
1911 return 256;
1912 default:
1913 MISSING_CASE(cpp);
1914 return cpp;
1915 }
1916 break;
1917 default:
1918 MISSING_CASE(fb->modifier);
1919 return cpp;
1920 }
1921 }
1922
1923 static unsigned int
1924 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1925 {
1926 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1927 return 1;
1928 else
1929 return intel_tile_size(to_i915(fb->dev)) /
1930 intel_tile_width_bytes(fb, color_plane);
1931 }
1932
1933 /* Return the tile dimensions in pixel units */
1934 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1935 unsigned int *tile_width,
1936 unsigned int *tile_height)
1937 {
1938 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1939 unsigned int cpp = fb->format->cpp[color_plane];
1940
1941 *tile_width = tile_width_bytes / cpp;
1942 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1943 }
1944
1945 unsigned int
1946 intel_fb_align_height(const struct drm_framebuffer *fb,
1947 int color_plane, unsigned int height)
1948 {
1949 unsigned int tile_height = intel_tile_height(fb, color_plane);
1950
1951 return ALIGN(height, tile_height);
1952 }
1953
1954 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1955 {
1956 unsigned int size = 0;
1957 int i;
1958
1959 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1960 size += rot_info->plane[i].width * rot_info->plane[i].height;
1961
1962 return size;
1963 }
1964
1965 static void
1966 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1967 const struct drm_framebuffer *fb,
1968 unsigned int rotation)
1969 {
1970 view->type = I915_GGTT_VIEW_NORMAL;
1971 if (drm_rotation_90_or_270(rotation)) {
1972 view->type = I915_GGTT_VIEW_ROTATED;
1973 view->rotated = to_intel_framebuffer(fb)->rot_info;
1974 }
1975 }
1976
1977 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1978 {
1979 if (IS_I830(dev_priv))
1980 return 16 * 1024;
1981 else if (IS_I85X(dev_priv))
1982 return 256;
1983 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1984 return 32;
1985 else
1986 return 4 * 1024;
1987 }
1988
1989 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1990 {
1991 if (INTEL_GEN(dev_priv) >= 9)
1992 return 256 * 1024;
1993 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1994 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1995 return 128 * 1024;
1996 else if (INTEL_GEN(dev_priv) >= 4)
1997 return 4 * 1024;
1998 else
1999 return 0;
2000 }
2001
2002 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2003 int color_plane)
2004 {
2005 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2006
2007 /* AUX_DIST needs only 4K alignment */
2008 if (color_plane == 1)
2009 return 4096;
2010
2011 switch (fb->modifier) {
2012 case DRM_FORMAT_MOD_LINEAR:
2013 return intel_linear_alignment(dev_priv);
2014 case I915_FORMAT_MOD_X_TILED:
2015 if (INTEL_GEN(dev_priv) >= 9)
2016 return 256 * 1024;
2017 return 0;
2018 case I915_FORMAT_MOD_Y_TILED_CCS:
2019 case I915_FORMAT_MOD_Yf_TILED_CCS:
2020 case I915_FORMAT_MOD_Y_TILED:
2021 case I915_FORMAT_MOD_Yf_TILED:
2022 return 1 * 1024 * 1024;
2023 default:
2024 MISSING_CASE(fb->modifier);
2025 return 0;
2026 }
2027 }
2028
2029 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2030 {
2031 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2032 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2033
2034 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2035 }
2036
2037 struct i915_vma *
2038 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2039 const struct i915_ggtt_view *view,
2040 bool uses_fence,
2041 unsigned long *out_flags)
2042 {
2043 struct drm_device *dev = fb->dev;
2044 struct drm_i915_private *dev_priv = to_i915(dev);
2045 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2046 intel_wakeref_t wakeref;
2047 struct i915_vma *vma;
2048 unsigned int pinctl;
2049 u32 alignment;
2050
2051 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2052
2053 alignment = intel_surf_alignment(fb, 0);
2054
2055 /* Note that the w/a also requires 64 PTE of padding following the
2056 * bo. We currently fill all unused PTE with the shadow page and so
2057 * we should always have valid PTE following the scanout preventing
2058 * the VT-d warning.
2059 */
2060 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2061 alignment = 256 * 1024;
2062
2063 /*
2064 * Global gtt pte registers are special registers which actually forward
2065 * writes to a chunk of system memory. Which means that there is no risk
2066 * that the register values disappear as soon as we call
2067 * intel_runtime_pm_put(), so it is correct to wrap only the
2068 * pin/unpin/fence and not more.
2069 */
2070 wakeref = intel_runtime_pm_get(dev_priv);
2071
2072 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2073
2074 pinctl = 0;
2075
2076 /* Valleyview is definitely limited to scanning out the first
2077 * 512MiB. Lets presume this behaviour was inherited from the
2078 * g4x display engine and that all earlier gen are similarly
2079 * limited. Testing suggests that it is a little more
2080 * complicated than this. For example, Cherryview appears quite
2081 * happy to scanout from anywhere within its global aperture.
2082 */
2083 if (HAS_GMCH(dev_priv))
2084 pinctl |= PIN_MAPPABLE;
2085
2086 vma = i915_gem_object_pin_to_display_plane(obj,
2087 alignment, view, pinctl);
2088 if (IS_ERR(vma))
2089 goto err;
2090
2091 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2092 int ret;
2093
2094 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2095 * fence, whereas 965+ only requires a fence if using
2096 * framebuffer compression. For simplicity, we always, when
2097 * possible, install a fence as the cost is not that onerous.
2098 *
2099 * If we fail to fence the tiled scanout, then either the
2100 * modeset will reject the change (which is highly unlikely as
2101 * the affected systems, all but one, do not have unmappable
2102 * space) or we will not be able to enable full powersaving
2103 * techniques (also likely not to apply due to various limits
2104 * FBC and the like impose on the size of the buffer, which
2105 * presumably we violated anyway with this unmappable buffer).
2106 * Anyway, it is presumably better to stumble onwards with
2107 * something and try to run the system in a "less than optimal"
2108 * mode that matches the user configuration.
2109 */
2110 ret = i915_vma_pin_fence(vma);
2111 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2112 i915_gem_object_unpin_from_display_plane(vma);
2113 vma = ERR_PTR(ret);
2114 goto err;
2115 }
2116
2117 if (ret == 0 && vma->fence)
2118 *out_flags |= PLANE_HAS_FENCE;
2119 }
2120
2121 i915_vma_get(vma);
2122 err:
2123 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2124
2125 intel_runtime_pm_put(dev_priv, wakeref);
2126 return vma;
2127 }
2128
2129 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2130 {
2131 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2132
2133 if (flags & PLANE_HAS_FENCE)
2134 i915_vma_unpin_fence(vma);
2135 i915_gem_object_unpin_from_display_plane(vma);
2136 i915_vma_put(vma);
2137 }
2138
2139 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2140 unsigned int rotation)
2141 {
2142 if (drm_rotation_90_or_270(rotation))
2143 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2144 else
2145 return fb->pitches[color_plane];
2146 }
2147
2148 /*
2149 * Convert the x/y offsets into a linear offset.
2150 * Only valid with 0/180 degree rotation, which is fine since linear
2151 * offset is only used with linear buffers on pre-hsw and tiled buffers
2152 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2153 */
2154 u32 intel_fb_xy_to_linear(int x, int y,
2155 const struct intel_plane_state *state,
2156 int color_plane)
2157 {
2158 const struct drm_framebuffer *fb = state->base.fb;
2159 unsigned int cpp = fb->format->cpp[color_plane];
2160 unsigned int pitch = state->color_plane[color_plane].stride;
2161
2162 return y * pitch + x * cpp;
2163 }
2164
2165 /*
2166 * Add the x/y offsets derived from fb->offsets[] to the user
2167 * specified plane src x/y offsets. The resulting x/y offsets
2168 * specify the start of scanout from the beginning of the gtt mapping.
2169 */
2170 void intel_add_fb_offsets(int *x, int *y,
2171 const struct intel_plane_state *state,
2172 int color_plane)
2173
2174 {
2175 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2176 unsigned int rotation = state->base.rotation;
2177
2178 if (drm_rotation_90_or_270(rotation)) {
2179 *x += intel_fb->rotated[color_plane].x;
2180 *y += intel_fb->rotated[color_plane].y;
2181 } else {
2182 *x += intel_fb->normal[color_plane].x;
2183 *y += intel_fb->normal[color_plane].y;
2184 }
2185 }
2186
2187 static u32 intel_adjust_tile_offset(int *x, int *y,
2188 unsigned int tile_width,
2189 unsigned int tile_height,
2190 unsigned int tile_size,
2191 unsigned int pitch_tiles,
2192 u32 old_offset,
2193 u32 new_offset)
2194 {
2195 unsigned int pitch_pixels = pitch_tiles * tile_width;
2196 unsigned int tiles;
2197
2198 WARN_ON(old_offset & (tile_size - 1));
2199 WARN_ON(new_offset & (tile_size - 1));
2200 WARN_ON(new_offset > old_offset);
2201
2202 tiles = (old_offset - new_offset) / tile_size;
2203
2204 *y += tiles / pitch_tiles * tile_height;
2205 *x += tiles % pitch_tiles * tile_width;
2206
2207 /* minimize x in case it got needlessly big */
2208 *y += *x / pitch_pixels * tile_height;
2209 *x %= pitch_pixels;
2210
2211 return new_offset;
2212 }
2213
2214 static bool is_surface_linear(u64 modifier, int color_plane)
2215 {
2216 return modifier == DRM_FORMAT_MOD_LINEAR;
2217 }
2218
2219 static u32 intel_adjust_aligned_offset(int *x, int *y,
2220 const struct drm_framebuffer *fb,
2221 int color_plane,
2222 unsigned int rotation,
2223 unsigned int pitch,
2224 u32 old_offset, u32 new_offset)
2225 {
2226 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2227 unsigned int cpp = fb->format->cpp[color_plane];
2228
2229 WARN_ON(new_offset > old_offset);
2230
2231 if (!is_surface_linear(fb->modifier, color_plane)) {
2232 unsigned int tile_size, tile_width, tile_height;
2233 unsigned int pitch_tiles;
2234
2235 tile_size = intel_tile_size(dev_priv);
2236 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2237
2238 if (drm_rotation_90_or_270(rotation)) {
2239 pitch_tiles = pitch / tile_height;
2240 swap(tile_width, tile_height);
2241 } else {
2242 pitch_tiles = pitch / (tile_width * cpp);
2243 }
2244
2245 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2246 tile_size, pitch_tiles,
2247 old_offset, new_offset);
2248 } else {
2249 old_offset += *y * pitch + *x * cpp;
2250
2251 *y = (old_offset - new_offset) / pitch;
2252 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2253 }
2254
2255 return new_offset;
2256 }
2257
2258 /*
2259 * Adjust the tile offset by moving the difference into
2260 * the x/y offsets.
2261 */
2262 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2263 const struct intel_plane_state *state,
2264 int color_plane,
2265 u32 old_offset, u32 new_offset)
2266 {
2267 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2268 state->base.rotation,
2269 state->color_plane[color_plane].stride,
2270 old_offset, new_offset);
2271 }
2272
2273 /*
2274 * Computes the aligned offset to the base tile and adjusts
2275 * x, y. bytes per pixel is assumed to be a power-of-two.
2276 *
2277 * In the 90/270 rotated case, x and y are assumed
2278 * to be already rotated to match the rotated GTT view, and
2279 * pitch is the tile_height aligned framebuffer height.
2280 *
2281 * This function is used when computing the derived information
2282 * under intel_framebuffer, so using any of that information
2283 * here is not allowed. Anything under drm_framebuffer can be
2284 * used. This is why the user has to pass in the pitch since it
2285 * is specified in the rotated orientation.
2286 */
2287 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2288 int *x, int *y,
2289 const struct drm_framebuffer *fb,
2290 int color_plane,
2291 unsigned int pitch,
2292 unsigned int rotation,
2293 u32 alignment)
2294 {
2295 unsigned int cpp = fb->format->cpp[color_plane];
2296 u32 offset, offset_aligned;
2297
2298 if (alignment)
2299 alignment--;
2300
2301 if (!is_surface_linear(fb->modifier, color_plane)) {
2302 unsigned int tile_size, tile_width, tile_height;
2303 unsigned int tile_rows, tiles, pitch_tiles;
2304
2305 tile_size = intel_tile_size(dev_priv);
2306 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2307
2308 if (drm_rotation_90_or_270(rotation)) {
2309 pitch_tiles = pitch / tile_height;
2310 swap(tile_width, tile_height);
2311 } else {
2312 pitch_tiles = pitch / (tile_width * cpp);
2313 }
2314
2315 tile_rows = *y / tile_height;
2316 *y %= tile_height;
2317
2318 tiles = *x / tile_width;
2319 *x %= tile_width;
2320
2321 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2322 offset_aligned = offset & ~alignment;
2323
2324 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2325 tile_size, pitch_tiles,
2326 offset, offset_aligned);
2327 } else {
2328 offset = *y * pitch + *x * cpp;
2329 offset_aligned = offset & ~alignment;
2330
2331 *y = (offset & alignment) / pitch;
2332 *x = ((offset & alignment) - *y * pitch) / cpp;
2333 }
2334
2335 return offset_aligned;
2336 }
2337
2338 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2339 const struct intel_plane_state *state,
2340 int color_plane)
2341 {
2342 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2343 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2344 const struct drm_framebuffer *fb = state->base.fb;
2345 unsigned int rotation = state->base.rotation;
2346 int pitch = state->color_plane[color_plane].stride;
2347 u32 alignment;
2348
2349 if (intel_plane->id == PLANE_CURSOR)
2350 alignment = intel_cursor_alignment(dev_priv);
2351 else
2352 alignment = intel_surf_alignment(fb, color_plane);
2353
2354 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2355 pitch, rotation, alignment);
2356 }
2357
2358 /* Convert the fb->offset[] into x/y offsets */
2359 static int intel_fb_offset_to_xy(int *x, int *y,
2360 const struct drm_framebuffer *fb,
2361 int color_plane)
2362 {
2363 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2364 unsigned int height;
2365
2366 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2367 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2368 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2369 fb->offsets[color_plane], color_plane);
2370 return -EINVAL;
2371 }
2372
2373 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2374 height = ALIGN(height, intel_tile_height(fb, color_plane));
2375
2376 /* Catch potential overflows early */
2377 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2378 fb->offsets[color_plane])) {
2379 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2380 fb->offsets[color_plane], fb->pitches[color_plane],
2381 color_plane);
2382 return -ERANGE;
2383 }
2384
2385 *x = 0;
2386 *y = 0;
2387
2388 intel_adjust_aligned_offset(x, y,
2389 fb, color_plane, DRM_MODE_ROTATE_0,
2390 fb->pitches[color_plane],
2391 fb->offsets[color_plane], 0);
2392
2393 return 0;
2394 }
2395
2396 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2397 {
2398 switch (fb_modifier) {
2399 case I915_FORMAT_MOD_X_TILED:
2400 return I915_TILING_X;
2401 case I915_FORMAT_MOD_Y_TILED:
2402 case I915_FORMAT_MOD_Y_TILED_CCS:
2403 return I915_TILING_Y;
2404 default:
2405 return I915_TILING_NONE;
2406 }
2407 }
2408
2409 /*
2410 * From the Sky Lake PRM:
2411 * "The Color Control Surface (CCS) contains the compression status of
2412 * the cache-line pairs. The compression state of the cache-line pair
2413 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2414 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2415 * cache-line-pairs. CCS is always Y tiled."
2416 *
2417 * Since cache line pairs refers to horizontally adjacent cache lines,
2418 * each cache line in the CCS corresponds to an area of 32x16 cache
2419 * lines on the main surface. Since each pixel is 4 bytes, this gives
2420 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2421 * main surface.
2422 */
2423 static const struct drm_format_info ccs_formats[] = {
2424 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2425 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2426 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2427 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2428 };
2429
2430 static const struct drm_format_info *
2431 lookup_format_info(const struct drm_format_info formats[],
2432 int num_formats, u32 format)
2433 {
2434 int i;
2435
2436 for (i = 0; i < num_formats; i++) {
2437 if (formats[i].format == format)
2438 return &formats[i];
2439 }
2440
2441 return NULL;
2442 }
2443
2444 static const struct drm_format_info *
2445 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2446 {
2447 switch (cmd->modifier[0]) {
2448 case I915_FORMAT_MOD_Y_TILED_CCS:
2449 case I915_FORMAT_MOD_Yf_TILED_CCS:
2450 return lookup_format_info(ccs_formats,
2451 ARRAY_SIZE(ccs_formats),
2452 cmd->pixel_format);
2453 default:
2454 return NULL;
2455 }
2456 }
2457
2458 bool is_ccs_modifier(u64 modifier)
2459 {
2460 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2461 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2462 }
2463
2464 static int
2465 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2466 struct drm_framebuffer *fb)
2467 {
2468 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2469 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2470 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2471 u32 gtt_offset_rotated = 0;
2472 unsigned int max_size = 0;
2473 int i, num_planes = fb->format->num_planes;
2474 unsigned int tile_size = intel_tile_size(dev_priv);
2475
2476 for (i = 0; i < num_planes; i++) {
2477 unsigned int width, height;
2478 unsigned int cpp, size;
2479 u32 offset;
2480 int x, y;
2481 int ret;
2482
2483 cpp = fb->format->cpp[i];
2484 width = drm_framebuffer_plane_width(fb->width, fb, i);
2485 height = drm_framebuffer_plane_height(fb->height, fb, i);
2486
2487 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2488 if (ret) {
2489 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2490 i, fb->offsets[i]);
2491 return ret;
2492 }
2493
2494 if (is_ccs_modifier(fb->modifier) && i == 1) {
2495 int hsub = fb->format->hsub;
2496 int vsub = fb->format->vsub;
2497 int tile_width, tile_height;
2498 int main_x, main_y;
2499 int ccs_x, ccs_y;
2500
2501 intel_tile_dims(fb, i, &tile_width, &tile_height);
2502 tile_width *= hsub;
2503 tile_height *= vsub;
2504
2505 ccs_x = (x * hsub) % tile_width;
2506 ccs_y = (y * vsub) % tile_height;
2507 main_x = intel_fb->normal[0].x % tile_width;
2508 main_y = intel_fb->normal[0].y % tile_height;
2509
2510 /*
2511 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2512 * x/y offsets must match between CCS and the main surface.
2513 */
2514 if (main_x != ccs_x || main_y != ccs_y) {
2515 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2516 main_x, main_y,
2517 ccs_x, ccs_y,
2518 intel_fb->normal[0].x,
2519 intel_fb->normal[0].y,
2520 x, y);
2521 return -EINVAL;
2522 }
2523 }
2524
2525 /*
2526 * The fence (if used) is aligned to the start of the object
2527 * so having the framebuffer wrap around across the edge of the
2528 * fenced region doesn't really work. We have no API to configure
2529 * the fence start offset within the object (nor could we probably
2530 * on gen2/3). So it's just easier if we just require that the
2531 * fb layout agrees with the fence layout. We already check that the
2532 * fb stride matches the fence stride elsewhere.
2533 */
2534 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2535 (x + width) * cpp > fb->pitches[i]) {
2536 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2537 i, fb->offsets[i]);
2538 return -EINVAL;
2539 }
2540
2541 /*
2542 * First pixel of the framebuffer from
2543 * the start of the normal gtt mapping.
2544 */
2545 intel_fb->normal[i].x = x;
2546 intel_fb->normal[i].y = y;
2547
2548 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2549 fb->pitches[i],
2550 DRM_MODE_ROTATE_0,
2551 tile_size);
2552 offset /= tile_size;
2553
2554 if (!is_surface_linear(fb->modifier, i)) {
2555 unsigned int tile_width, tile_height;
2556 unsigned int pitch_tiles;
2557 struct drm_rect r;
2558
2559 intel_tile_dims(fb, i, &tile_width, &tile_height);
2560
2561 rot_info->plane[i].offset = offset;
2562 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2563 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2564 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2565
2566 intel_fb->rotated[i].pitch =
2567 rot_info->plane[i].height * tile_height;
2568
2569 /* how many tiles does this plane need */
2570 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2571 /*
2572 * If the plane isn't horizontally tile aligned,
2573 * we need one more tile.
2574 */
2575 if (x != 0)
2576 size++;
2577
2578 /* rotate the x/y offsets to match the GTT view */
2579 r.x1 = x;
2580 r.y1 = y;
2581 r.x2 = x + width;
2582 r.y2 = y + height;
2583 drm_rect_rotate(&r,
2584 rot_info->plane[i].width * tile_width,
2585 rot_info->plane[i].height * tile_height,
2586 DRM_MODE_ROTATE_270);
2587 x = r.x1;
2588 y = r.y1;
2589
2590 /* rotate the tile dimensions to match the GTT view */
2591 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2592 swap(tile_width, tile_height);
2593
2594 /*
2595 * We only keep the x/y offsets, so push all of the
2596 * gtt offset into the x/y offsets.
2597 */
2598 intel_adjust_tile_offset(&x, &y,
2599 tile_width, tile_height,
2600 tile_size, pitch_tiles,
2601 gtt_offset_rotated * tile_size, 0);
2602
2603 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2604
2605 /*
2606 * First pixel of the framebuffer from
2607 * the start of the rotated gtt mapping.
2608 */
2609 intel_fb->rotated[i].x = x;
2610 intel_fb->rotated[i].y = y;
2611 } else {
2612 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2613 x * cpp, tile_size);
2614 }
2615
2616 /* how many tiles in total needed in the bo */
2617 max_size = max(max_size, offset + size);
2618 }
2619
2620 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2621 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2622 mul_u32_u32(max_size, tile_size), obj->base.size);
2623 return -EINVAL;
2624 }
2625
2626 return 0;
2627 }
2628
2629 static int i9xx_format_to_fourcc(int format)
2630 {
2631 switch (format) {
2632 case DISPPLANE_8BPP:
2633 return DRM_FORMAT_C8;
2634 case DISPPLANE_BGRX555:
2635 return DRM_FORMAT_XRGB1555;
2636 case DISPPLANE_BGRX565:
2637 return DRM_FORMAT_RGB565;
2638 default:
2639 case DISPPLANE_BGRX888:
2640 return DRM_FORMAT_XRGB8888;
2641 case DISPPLANE_RGBX888:
2642 return DRM_FORMAT_XBGR8888;
2643 case DISPPLANE_BGRX101010:
2644 return DRM_FORMAT_XRGB2101010;
2645 case DISPPLANE_RGBX101010:
2646 return DRM_FORMAT_XBGR2101010;
2647 }
2648 }
2649
2650 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2651 {
2652 switch (format) {
2653 case PLANE_CTL_FORMAT_RGB_565:
2654 return DRM_FORMAT_RGB565;
2655 case PLANE_CTL_FORMAT_NV12:
2656 return DRM_FORMAT_NV12;
2657 case PLANE_CTL_FORMAT_P010:
2658 return DRM_FORMAT_P010;
2659 case PLANE_CTL_FORMAT_P012:
2660 return DRM_FORMAT_P012;
2661 case PLANE_CTL_FORMAT_P016:
2662 return DRM_FORMAT_P016;
2663 case PLANE_CTL_FORMAT_Y210:
2664 return DRM_FORMAT_Y210;
2665 case PLANE_CTL_FORMAT_Y212:
2666 return DRM_FORMAT_Y212;
2667 case PLANE_CTL_FORMAT_Y216:
2668 return DRM_FORMAT_Y216;
2669 case PLANE_CTL_FORMAT_Y410:
2670 return DRM_FORMAT_XVYU2101010;
2671 case PLANE_CTL_FORMAT_Y412:
2672 return DRM_FORMAT_XVYU12_16161616;
2673 case PLANE_CTL_FORMAT_Y416:
2674 return DRM_FORMAT_XVYU16161616;
2675 default:
2676 case PLANE_CTL_FORMAT_XRGB_8888:
2677 if (rgb_order) {
2678 if (alpha)
2679 return DRM_FORMAT_ABGR8888;
2680 else
2681 return DRM_FORMAT_XBGR8888;
2682 } else {
2683 if (alpha)
2684 return DRM_FORMAT_ARGB8888;
2685 else
2686 return DRM_FORMAT_XRGB8888;
2687 }
2688 case PLANE_CTL_FORMAT_XRGB_2101010:
2689 if (rgb_order)
2690 return DRM_FORMAT_XBGR2101010;
2691 else
2692 return DRM_FORMAT_XRGB2101010;
2693 case PLANE_CTL_FORMAT_XRGB_16161616F:
2694 if (rgb_order) {
2695 if (alpha)
2696 return DRM_FORMAT_ABGR16161616F;
2697 else
2698 return DRM_FORMAT_XBGR16161616F;
2699 } else {
2700 if (alpha)
2701 return DRM_FORMAT_ARGB16161616F;
2702 else
2703 return DRM_FORMAT_XRGB16161616F;
2704 }
2705 }
2706 }
2707
2708 static bool
2709 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2710 struct intel_initial_plane_config *plane_config)
2711 {
2712 struct drm_device *dev = crtc->base.dev;
2713 struct drm_i915_private *dev_priv = to_i915(dev);
2714 struct drm_i915_gem_object *obj = NULL;
2715 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2716 struct drm_framebuffer *fb = &plane_config->fb->base;
2717 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2718 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2719 PAGE_SIZE);
2720
2721 size_aligned -= base_aligned;
2722
2723 if (plane_config->size == 0)
2724 return false;
2725
2726 /* If the FB is too big, just don't use it since fbdev is not very
2727 * important and we should probably use that space with FBC or other
2728 * features. */
2729 if (size_aligned * 2 > dev_priv->stolen_usable_size)
2730 return false;
2731
2732 switch (fb->modifier) {
2733 case DRM_FORMAT_MOD_LINEAR:
2734 case I915_FORMAT_MOD_X_TILED:
2735 case I915_FORMAT_MOD_Y_TILED:
2736 break;
2737 default:
2738 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2739 fb->modifier);
2740 return false;
2741 }
2742
2743 mutex_lock(&dev->struct_mutex);
2744 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2745 base_aligned,
2746 base_aligned,
2747 size_aligned);
2748 mutex_unlock(&dev->struct_mutex);
2749 if (!obj)
2750 return false;
2751
2752 switch (plane_config->tiling) {
2753 case I915_TILING_NONE:
2754 break;
2755 case I915_TILING_X:
2756 case I915_TILING_Y:
2757 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2758 break;
2759 default:
2760 MISSING_CASE(plane_config->tiling);
2761 return false;
2762 }
2763
2764 mode_cmd.pixel_format = fb->format->format;
2765 mode_cmd.width = fb->width;
2766 mode_cmd.height = fb->height;
2767 mode_cmd.pitches[0] = fb->pitches[0];
2768 mode_cmd.modifier[0] = fb->modifier;
2769 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2770
2771 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2772 DRM_DEBUG_KMS("intel fb init failed\n");
2773 goto out_unref_obj;
2774 }
2775
2776
2777 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2778 return true;
2779
2780 out_unref_obj:
2781 i915_gem_object_put(obj);
2782 return false;
2783 }
2784
2785 static void
2786 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2787 struct intel_plane_state *plane_state,
2788 bool visible)
2789 {
2790 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2791
2792 plane_state->base.visible = visible;
2793
2794 if (visible)
2795 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2796 else
2797 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2798 }
2799
2800 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2801 {
2802 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2803 struct drm_plane *plane;
2804
2805 /*
2806 * Active_planes aliases if multiple "primary" or cursor planes
2807 * have been used on the same (or wrong) pipe. plane_mask uses
2808 * unique ids, hence we can use that to reconstruct active_planes.
2809 */
2810 crtc_state->active_planes = 0;
2811
2812 drm_for_each_plane_mask(plane, &dev_priv->drm,
2813 crtc_state->base.plane_mask)
2814 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2815 }
2816
2817 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2818 struct intel_plane *plane)
2819 {
2820 struct intel_crtc_state *crtc_state =
2821 to_intel_crtc_state(crtc->base.state);
2822 struct intel_plane_state *plane_state =
2823 to_intel_plane_state(plane->base.state);
2824
2825 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2826 plane->base.base.id, plane->base.name,
2827 crtc->base.base.id, crtc->base.name);
2828
2829 intel_set_plane_visible(crtc_state, plane_state, false);
2830 fixup_active_planes(crtc_state);
2831
2832 if (plane->id == PLANE_PRIMARY)
2833 intel_pre_disable_primary_noatomic(&crtc->base);
2834
2835 intel_disable_plane(plane, crtc_state);
2836 }
2837
2838 static void
2839 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2840 struct intel_initial_plane_config *plane_config)
2841 {
2842 struct drm_device *dev = intel_crtc->base.dev;
2843 struct drm_i915_private *dev_priv = to_i915(dev);
2844 struct drm_crtc *c;
2845 struct drm_i915_gem_object *obj;
2846 struct drm_plane *primary = intel_crtc->base.primary;
2847 struct drm_plane_state *plane_state = primary->state;
2848 struct intel_plane *intel_plane = to_intel_plane(primary);
2849 struct intel_plane_state *intel_state =
2850 to_intel_plane_state(plane_state);
2851 struct drm_framebuffer *fb;
2852
2853 if (!plane_config->fb)
2854 return;
2855
2856 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2857 fb = &plane_config->fb->base;
2858 goto valid_fb;
2859 }
2860
2861 kfree(plane_config->fb);
2862
2863 /*
2864 * Failed to alloc the obj, check to see if we should share
2865 * an fb with another CRTC instead
2866 */
2867 for_each_crtc(dev, c) {
2868 struct intel_plane_state *state;
2869
2870 if (c == &intel_crtc->base)
2871 continue;
2872
2873 if (!to_intel_crtc(c)->active)
2874 continue;
2875
2876 state = to_intel_plane_state(c->primary->state);
2877 if (!state->vma)
2878 continue;
2879
2880 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2881 fb = state->base.fb;
2882 drm_framebuffer_get(fb);
2883 goto valid_fb;
2884 }
2885 }
2886
2887 /*
2888 * We've failed to reconstruct the BIOS FB. Current display state
2889 * indicates that the primary plane is visible, but has a NULL FB,
2890 * which will lead to problems later if we don't fix it up. The
2891 * simplest solution is to just disable the primary plane now and
2892 * pretend the BIOS never had it enabled.
2893 */
2894 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2895
2896 return;
2897
2898 valid_fb:
2899 intel_state->base.rotation = plane_config->rotation;
2900 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2901 intel_state->base.rotation);
2902 intel_state->color_plane[0].stride =
2903 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2904
2905 mutex_lock(&dev->struct_mutex);
2906 intel_state->vma =
2907 intel_pin_and_fence_fb_obj(fb,
2908 &intel_state->view,
2909 intel_plane_uses_fence(intel_state),
2910 &intel_state->flags);
2911 mutex_unlock(&dev->struct_mutex);
2912 if (IS_ERR(intel_state->vma)) {
2913 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2914 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2915
2916 intel_state->vma = NULL;
2917 drm_framebuffer_put(fb);
2918 return;
2919 }
2920
2921 obj = intel_fb_obj(fb);
2922 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2923
2924 plane_state->src_x = 0;
2925 plane_state->src_y = 0;
2926 plane_state->src_w = fb->width << 16;
2927 plane_state->src_h = fb->height << 16;
2928
2929 plane_state->crtc_x = 0;
2930 plane_state->crtc_y = 0;
2931 plane_state->crtc_w = fb->width;
2932 plane_state->crtc_h = fb->height;
2933
2934 intel_state->base.src = drm_plane_state_src(plane_state);
2935 intel_state->base.dst = drm_plane_state_dest(plane_state);
2936
2937 if (i915_gem_object_is_tiled(obj))
2938 dev_priv->preserve_bios_swizzle = true;
2939
2940 plane_state->fb = fb;
2941 plane_state->crtc = &intel_crtc->base;
2942
2943 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2944 &obj->frontbuffer_bits);
2945 }
2946
2947 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2948 int color_plane,
2949 unsigned int rotation)
2950 {
2951 int cpp = fb->format->cpp[color_plane];
2952
2953 switch (fb->modifier) {
2954 case DRM_FORMAT_MOD_LINEAR:
2955 case I915_FORMAT_MOD_X_TILED:
2956 switch (cpp) {
2957 case 8:
2958 return 4096;
2959 case 4:
2960 case 2:
2961 case 1:
2962 return 8192;
2963 default:
2964 MISSING_CASE(cpp);
2965 break;
2966 }
2967 break;
2968 case I915_FORMAT_MOD_Y_TILED_CCS:
2969 case I915_FORMAT_MOD_Yf_TILED_CCS:
2970 /* FIXME AUX plane? */
2971 case I915_FORMAT_MOD_Y_TILED:
2972 case I915_FORMAT_MOD_Yf_TILED:
2973 switch (cpp) {
2974 case 8:
2975 return 2048;
2976 case 4:
2977 return 4096;
2978 case 2:
2979 case 1:
2980 return 8192;
2981 default:
2982 MISSING_CASE(cpp);
2983 break;
2984 }
2985 break;
2986 default:
2987 MISSING_CASE(fb->modifier);
2988 }
2989
2990 return 2048;
2991 }
2992
2993 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2994 int main_x, int main_y, u32 main_offset)
2995 {
2996 const struct drm_framebuffer *fb = plane_state->base.fb;
2997 int hsub = fb->format->hsub;
2998 int vsub = fb->format->vsub;
2999 int aux_x = plane_state->color_plane[1].x;
3000 int aux_y = plane_state->color_plane[1].y;
3001 u32 aux_offset = plane_state->color_plane[1].offset;
3002 u32 alignment = intel_surf_alignment(fb, 1);
3003
3004 while (aux_offset >= main_offset && aux_y <= main_y) {
3005 int x, y;
3006
3007 if (aux_x == main_x && aux_y == main_y)
3008 break;
3009
3010 if (aux_offset == 0)
3011 break;
3012
3013 x = aux_x / hsub;
3014 y = aux_y / vsub;
3015 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3016 aux_offset, aux_offset - alignment);
3017 aux_x = x * hsub + aux_x % hsub;
3018 aux_y = y * vsub + aux_y % vsub;
3019 }
3020
3021 if (aux_x != main_x || aux_y != main_y)
3022 return false;
3023
3024 plane_state->color_plane[1].offset = aux_offset;
3025 plane_state->color_plane[1].x = aux_x;
3026 plane_state->color_plane[1].y = aux_y;
3027
3028 return true;
3029 }
3030
3031 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3032 {
3033 const struct drm_framebuffer *fb = plane_state->base.fb;
3034 unsigned int rotation = plane_state->base.rotation;
3035 int x = plane_state->base.src.x1 >> 16;
3036 int y = plane_state->base.src.y1 >> 16;
3037 int w = drm_rect_width(&plane_state->base.src) >> 16;
3038 int h = drm_rect_height(&plane_state->base.src) >> 16;
3039 int max_width = skl_max_plane_width(fb, 0, rotation);
3040 int max_height = 4096;
3041 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3042
3043 if (w > max_width || h > max_height) {
3044 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3045 w, h, max_width, max_height);
3046 return -EINVAL;
3047 }
3048
3049 intel_add_fb_offsets(&x, &y, plane_state, 0);
3050 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3051 alignment = intel_surf_alignment(fb, 0);
3052
3053 /*
3054 * AUX surface offset is specified as the distance from the
3055 * main surface offset, and it must be non-negative. Make
3056 * sure that is what we will get.
3057 */
3058 if (offset > aux_offset)
3059 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3060 offset, aux_offset & ~(alignment - 1));
3061
3062 /*
3063 * When using an X-tiled surface, the plane blows up
3064 * if the x offset + width exceed the stride.
3065 *
3066 * TODO: linear and Y-tiled seem fine, Yf untested,
3067 */
3068 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3069 int cpp = fb->format->cpp[0];
3070
3071 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3072 if (offset == 0) {
3073 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3074 return -EINVAL;
3075 }
3076
3077 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3078 offset, offset - alignment);
3079 }
3080 }
3081
3082 /*
3083 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3084 * they match with the main surface x/y offsets.
3085 */
3086 if (is_ccs_modifier(fb->modifier)) {
3087 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3088 if (offset == 0)
3089 break;
3090
3091 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3092 offset, offset - alignment);
3093 }
3094
3095 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3096 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3097 return -EINVAL;
3098 }
3099 }
3100
3101 plane_state->color_plane[0].offset = offset;
3102 plane_state->color_plane[0].x = x;
3103 plane_state->color_plane[0].y = y;
3104
3105 return 0;
3106 }
3107
3108 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3109 {
3110 const struct drm_framebuffer *fb = plane_state->base.fb;
3111 unsigned int rotation = plane_state->base.rotation;
3112 int max_width = skl_max_plane_width(fb, 1, rotation);
3113 int max_height = 4096;
3114 int x = plane_state->base.src.x1 >> 17;
3115 int y = plane_state->base.src.y1 >> 17;
3116 int w = drm_rect_width(&plane_state->base.src) >> 17;
3117 int h = drm_rect_height(&plane_state->base.src) >> 17;
3118 u32 offset;
3119
3120 intel_add_fb_offsets(&x, &y, plane_state, 1);
3121 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3122
3123 /* FIXME not quite sure how/if these apply to the chroma plane */
3124 if (w > max_width || h > max_height) {
3125 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3126 w, h, max_width, max_height);
3127 return -EINVAL;
3128 }
3129
3130 plane_state->color_plane[1].offset = offset;
3131 plane_state->color_plane[1].x = x;
3132 plane_state->color_plane[1].y = y;
3133
3134 return 0;
3135 }
3136
3137 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3138 {
3139 const struct drm_framebuffer *fb = plane_state->base.fb;
3140 int src_x = plane_state->base.src.x1 >> 16;
3141 int src_y = plane_state->base.src.y1 >> 16;
3142 int hsub = fb->format->hsub;
3143 int vsub = fb->format->vsub;
3144 int x = src_x / hsub;
3145 int y = src_y / vsub;
3146 u32 offset;
3147
3148 intel_add_fb_offsets(&x, &y, plane_state, 1);
3149 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3150
3151 plane_state->color_plane[1].offset = offset;
3152 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3153 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3154
3155 return 0;
3156 }
3157
3158 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3159 {
3160 const struct drm_framebuffer *fb = plane_state->base.fb;
3161 unsigned int rotation = plane_state->base.rotation;
3162 int ret;
3163
3164 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3165 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3166 plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3167
3168 ret = intel_plane_check_stride(plane_state);
3169 if (ret)
3170 return ret;
3171
3172 if (!plane_state->base.visible)
3173 return 0;
3174
3175 /* Rotate src coordinates to match rotated GTT view */
3176 if (drm_rotation_90_or_270(rotation))
3177 drm_rect_rotate(&plane_state->base.src,
3178 fb->width << 16, fb->height << 16,
3179 DRM_MODE_ROTATE_270);
3180
3181 /*
3182 * Handle the AUX surface first since
3183 * the main surface setup depends on it.
3184 */
3185 if (is_planar_yuv_format(fb->format->format)) {
3186 ret = skl_check_nv12_aux_surface(plane_state);
3187 if (ret)
3188 return ret;
3189 } else if (is_ccs_modifier(fb->modifier)) {
3190 ret = skl_check_ccs_aux_surface(plane_state);
3191 if (ret)
3192 return ret;
3193 } else {
3194 plane_state->color_plane[1].offset = ~0xfff;
3195 plane_state->color_plane[1].x = 0;
3196 plane_state->color_plane[1].y = 0;
3197 }
3198
3199 ret = skl_check_main_surface(plane_state);
3200 if (ret)
3201 return ret;
3202
3203 return 0;
3204 }
3205
3206 unsigned int
3207 i9xx_plane_max_stride(struct intel_plane *plane,
3208 u32 pixel_format, u64 modifier,
3209 unsigned int rotation)
3210 {
3211 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3212
3213 if (!HAS_GMCH(dev_priv)) {
3214 return 32*1024;
3215 } else if (INTEL_GEN(dev_priv) >= 4) {
3216 if (modifier == I915_FORMAT_MOD_X_TILED)
3217 return 16*1024;
3218 else
3219 return 32*1024;
3220 } else if (INTEL_GEN(dev_priv) >= 3) {
3221 if (modifier == I915_FORMAT_MOD_X_TILED)
3222 return 8*1024;
3223 else
3224 return 16*1024;
3225 } else {
3226 if (plane->i9xx_plane == PLANE_C)
3227 return 4*1024;
3228 else
3229 return 8*1024;
3230 }
3231 }
3232
3233 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3234 {
3235 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3236 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3237 u32 dspcntr = 0;
3238
3239 if (crtc_state->gamma_enable)
3240 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3241
3242 if (crtc_state->csc_enable)
3243 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3244
3245 if (INTEL_GEN(dev_priv) < 5)
3246 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3247
3248 return dspcntr;
3249 }
3250
3251 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3252 const struct intel_plane_state *plane_state)
3253 {
3254 struct drm_i915_private *dev_priv =
3255 to_i915(plane_state->base.plane->dev);
3256 const struct drm_framebuffer *fb = plane_state->base.fb;
3257 unsigned int rotation = plane_state->base.rotation;
3258 u32 dspcntr;
3259
3260 dspcntr = DISPLAY_PLANE_ENABLE;
3261
3262 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3263 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3264 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3265
3266 switch (fb->format->format) {
3267 case DRM_FORMAT_C8:
3268 dspcntr |= DISPPLANE_8BPP;
3269 break;
3270 case DRM_FORMAT_XRGB1555:
3271 dspcntr |= DISPPLANE_BGRX555;
3272 break;
3273 case DRM_FORMAT_RGB565:
3274 dspcntr |= DISPPLANE_BGRX565;
3275 break;
3276 case DRM_FORMAT_XRGB8888:
3277 dspcntr |= DISPPLANE_BGRX888;
3278 break;
3279 case DRM_FORMAT_XBGR8888:
3280 dspcntr |= DISPPLANE_RGBX888;
3281 break;
3282 case DRM_FORMAT_XRGB2101010:
3283 dspcntr |= DISPPLANE_BGRX101010;
3284 break;
3285 case DRM_FORMAT_XBGR2101010:
3286 dspcntr |= DISPPLANE_RGBX101010;
3287 break;
3288 default:
3289 MISSING_CASE(fb->format->format);
3290 return 0;
3291 }
3292
3293 if (INTEL_GEN(dev_priv) >= 4 &&
3294 fb->modifier == I915_FORMAT_MOD_X_TILED)
3295 dspcntr |= DISPPLANE_TILED;
3296
3297 if (rotation & DRM_MODE_ROTATE_180)
3298 dspcntr |= DISPPLANE_ROTATE_180;
3299
3300 if (rotation & DRM_MODE_REFLECT_X)
3301 dspcntr |= DISPPLANE_MIRROR;
3302
3303 return dspcntr;
3304 }
3305
3306 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3307 {
3308 struct drm_i915_private *dev_priv =
3309 to_i915(plane_state->base.plane->dev);
3310 const struct drm_framebuffer *fb = plane_state->base.fb;
3311 unsigned int rotation = plane_state->base.rotation;
3312 int src_x = plane_state->base.src.x1 >> 16;
3313 int src_y = plane_state->base.src.y1 >> 16;
3314 u32 offset;
3315 int ret;
3316
3317 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3318 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3319
3320 ret = intel_plane_check_stride(plane_state);
3321 if (ret)
3322 return ret;
3323
3324 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3325
3326 if (INTEL_GEN(dev_priv) >= 4)
3327 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3328 plane_state, 0);
3329 else
3330 offset = 0;
3331
3332 /* HSW/BDW do this automagically in hardware */
3333 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3334 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3335 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3336
3337 if (rotation & DRM_MODE_ROTATE_180) {
3338 src_x += src_w - 1;
3339 src_y += src_h - 1;
3340 } else if (rotation & DRM_MODE_REFLECT_X) {
3341 src_x += src_w - 1;
3342 }
3343 }
3344
3345 plane_state->color_plane[0].offset = offset;
3346 plane_state->color_plane[0].x = src_x;
3347 plane_state->color_plane[0].y = src_y;
3348
3349 return 0;
3350 }
3351
3352 static int
3353 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3354 struct intel_plane_state *plane_state)
3355 {
3356 int ret;
3357
3358 ret = chv_plane_check_rotation(plane_state);
3359 if (ret)
3360 return ret;
3361
3362 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3363 &crtc_state->base,
3364 DRM_PLANE_HELPER_NO_SCALING,
3365 DRM_PLANE_HELPER_NO_SCALING,
3366 false, true);
3367 if (ret)
3368 return ret;
3369
3370 if (!plane_state->base.visible)
3371 return 0;
3372
3373 ret = intel_plane_check_src_coordinates(plane_state);
3374 if (ret)
3375 return ret;
3376
3377 ret = i9xx_check_plane_surface(plane_state);
3378 if (ret)
3379 return ret;
3380
3381 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3382
3383 return 0;
3384 }
3385
3386 static void i9xx_update_plane(struct intel_plane *plane,
3387 const struct intel_crtc_state *crtc_state,
3388 const struct intel_plane_state *plane_state)
3389 {
3390 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3391 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3392 u32 linear_offset;
3393 int x = plane_state->color_plane[0].x;
3394 int y = plane_state->color_plane[0].y;
3395 unsigned long irqflags;
3396 u32 dspaddr_offset;
3397 u32 dspcntr;
3398
3399 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3400
3401 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3402
3403 if (INTEL_GEN(dev_priv) >= 4)
3404 dspaddr_offset = plane_state->color_plane[0].offset;
3405 else
3406 dspaddr_offset = linear_offset;
3407
3408 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3409
3410 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3411
3412 if (INTEL_GEN(dev_priv) < 4) {
3413 /* pipesrc and dspsize control the size that is scaled from,
3414 * which should always be the user's requested size.
3415 */
3416 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3417 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3418 ((crtc_state->pipe_src_h - 1) << 16) |
3419 (crtc_state->pipe_src_w - 1));
3420 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3421 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3422 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3423 ((crtc_state->pipe_src_h - 1) << 16) |
3424 (crtc_state->pipe_src_w - 1));
3425 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3426 }
3427
3428 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3429 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3430 } else if (INTEL_GEN(dev_priv) >= 4) {
3431 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3432 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3433 }
3434
3435 /*
3436 * The control register self-arms if the plane was previously
3437 * disabled. Try to make the plane enable atomic by writing
3438 * the control register just before the surface register.
3439 */
3440 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3441 if (INTEL_GEN(dev_priv) >= 4)
3442 I915_WRITE_FW(DSPSURF(i9xx_plane),
3443 intel_plane_ggtt_offset(plane_state) +
3444 dspaddr_offset);
3445 else
3446 I915_WRITE_FW(DSPADDR(i9xx_plane),
3447 intel_plane_ggtt_offset(plane_state) +
3448 dspaddr_offset);
3449
3450 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3451 }
3452
3453 static void i9xx_disable_plane(struct intel_plane *plane,
3454 const struct intel_crtc_state *crtc_state)
3455 {
3456 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3457 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3458 unsigned long irqflags;
3459 u32 dspcntr;
3460
3461 /*
3462 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3463 * enable on ilk+ affect the pipe bottom color as
3464 * well, so we must configure them even if the plane
3465 * is disabled.
3466 *
3467 * On pre-g4x there is no way to gamma correct the
3468 * pipe bottom color but we'll keep on doing this
3469 * anyway so that the crtc state readout works correctly.
3470 */
3471 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3472
3473 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3474
3475 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3476 if (INTEL_GEN(dev_priv) >= 4)
3477 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3478 else
3479 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3480
3481 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3482 }
3483
3484 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3485 enum pipe *pipe)
3486 {
3487 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3488 enum intel_display_power_domain power_domain;
3489 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3490 intel_wakeref_t wakeref;
3491 bool ret;
3492 u32 val;
3493
3494 /*
3495 * Not 100% correct for planes that can move between pipes,
3496 * but that's only the case for gen2-4 which don't have any
3497 * display power wells.
3498 */
3499 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3500 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3501 if (!wakeref)
3502 return false;
3503
3504 val = I915_READ(DSPCNTR(i9xx_plane));
3505
3506 ret = val & DISPLAY_PLANE_ENABLE;
3507
3508 if (INTEL_GEN(dev_priv) >= 5)
3509 *pipe = plane->pipe;
3510 else
3511 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3512 DISPPLANE_SEL_PIPE_SHIFT;
3513
3514 intel_display_power_put(dev_priv, power_domain, wakeref);
3515
3516 return ret;
3517 }
3518
3519 static u32
3520 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3521 {
3522 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3523 return 64;
3524 else
3525 return intel_tile_width_bytes(fb, color_plane);
3526 }
3527
3528 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3529 {
3530 struct drm_device *dev = intel_crtc->base.dev;
3531 struct drm_i915_private *dev_priv = to_i915(dev);
3532
3533 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3534 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3535 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3536 }
3537
3538 /*
3539 * This function detaches (aka. unbinds) unused scalers in hardware
3540 */
3541 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3542 {
3543 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3544 const struct intel_crtc_scaler_state *scaler_state =
3545 &crtc_state->scaler_state;
3546 int i;
3547
3548 /* loop through and disable scalers that aren't in use */
3549 for (i = 0; i < intel_crtc->num_scalers; i++) {
3550 if (!scaler_state->scalers[i].in_use)
3551 skl_detach_scaler(intel_crtc, i);
3552 }
3553 }
3554
3555 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3556 int color_plane, unsigned int rotation)
3557 {
3558 /*
3559 * The stride is either expressed as a multiple of 64 bytes chunks for
3560 * linear buffers or in number of tiles for tiled buffers.
3561 */
3562 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3563 return 64;
3564 else if (drm_rotation_90_or_270(rotation))
3565 return intel_tile_height(fb, color_plane);
3566 else
3567 return intel_tile_width_bytes(fb, color_plane);
3568 }
3569
3570 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3571 int color_plane)
3572 {
3573 const struct drm_framebuffer *fb = plane_state->base.fb;
3574 unsigned int rotation = plane_state->base.rotation;
3575 u32 stride = plane_state->color_plane[color_plane].stride;
3576
3577 if (color_plane >= fb->format->num_planes)
3578 return 0;
3579
3580 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3581 }
3582
3583 static u32 skl_plane_ctl_format(u32 pixel_format)
3584 {
3585 switch (pixel_format) {
3586 case DRM_FORMAT_C8:
3587 return PLANE_CTL_FORMAT_INDEXED;
3588 case DRM_FORMAT_RGB565:
3589 return PLANE_CTL_FORMAT_RGB_565;
3590 case DRM_FORMAT_XBGR8888:
3591 case DRM_FORMAT_ABGR8888:
3592 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3593 case DRM_FORMAT_XRGB8888:
3594 case DRM_FORMAT_ARGB8888:
3595 return PLANE_CTL_FORMAT_XRGB_8888;
3596 case DRM_FORMAT_XRGB2101010:
3597 return PLANE_CTL_FORMAT_XRGB_2101010;
3598 case DRM_FORMAT_XBGR2101010:
3599 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3600 case DRM_FORMAT_XBGR16161616F:
3601 case DRM_FORMAT_ABGR16161616F:
3602 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3603 case DRM_FORMAT_XRGB16161616F:
3604 case DRM_FORMAT_ARGB16161616F:
3605 return PLANE_CTL_FORMAT_XRGB_16161616F;
3606 case DRM_FORMAT_YUYV:
3607 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3608 case DRM_FORMAT_YVYU:
3609 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3610 case DRM_FORMAT_UYVY:
3611 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3612 case DRM_FORMAT_VYUY:
3613 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3614 case DRM_FORMAT_NV12:
3615 return PLANE_CTL_FORMAT_NV12;
3616 case DRM_FORMAT_P010:
3617 return PLANE_CTL_FORMAT_P010;
3618 case DRM_FORMAT_P012:
3619 return PLANE_CTL_FORMAT_P012;
3620 case DRM_FORMAT_P016:
3621 return PLANE_CTL_FORMAT_P016;
3622 case DRM_FORMAT_Y210:
3623 return PLANE_CTL_FORMAT_Y210;
3624 case DRM_FORMAT_Y212:
3625 return PLANE_CTL_FORMAT_Y212;
3626 case DRM_FORMAT_Y216:
3627 return PLANE_CTL_FORMAT_Y216;
3628 case DRM_FORMAT_XVYU2101010:
3629 return PLANE_CTL_FORMAT_Y410;
3630 case DRM_FORMAT_XVYU12_16161616:
3631 return PLANE_CTL_FORMAT_Y412;
3632 case DRM_FORMAT_XVYU16161616:
3633 return PLANE_CTL_FORMAT_Y416;
3634 default:
3635 MISSING_CASE(pixel_format);
3636 }
3637
3638 return 0;
3639 }
3640
3641 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3642 {
3643 if (!plane_state->base.fb->format->has_alpha)
3644 return PLANE_CTL_ALPHA_DISABLE;
3645
3646 switch (plane_state->base.pixel_blend_mode) {
3647 case DRM_MODE_BLEND_PIXEL_NONE:
3648 return PLANE_CTL_ALPHA_DISABLE;
3649 case DRM_MODE_BLEND_PREMULTI:
3650 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3651 case DRM_MODE_BLEND_COVERAGE:
3652 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3653 default:
3654 MISSING_CASE(plane_state->base.pixel_blend_mode);
3655 return PLANE_CTL_ALPHA_DISABLE;
3656 }
3657 }
3658
3659 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3660 {
3661 if (!plane_state->base.fb->format->has_alpha)
3662 return PLANE_COLOR_ALPHA_DISABLE;
3663
3664 switch (plane_state->base.pixel_blend_mode) {
3665 case DRM_MODE_BLEND_PIXEL_NONE:
3666 return PLANE_COLOR_ALPHA_DISABLE;
3667 case DRM_MODE_BLEND_PREMULTI:
3668 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3669 case DRM_MODE_BLEND_COVERAGE:
3670 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3671 default:
3672 MISSING_CASE(plane_state->base.pixel_blend_mode);
3673 return PLANE_COLOR_ALPHA_DISABLE;
3674 }
3675 }
3676
3677 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
3678 {
3679 switch (fb_modifier) {
3680 case DRM_FORMAT_MOD_LINEAR:
3681 break;
3682 case I915_FORMAT_MOD_X_TILED:
3683 return PLANE_CTL_TILED_X;
3684 case I915_FORMAT_MOD_Y_TILED:
3685 return PLANE_CTL_TILED_Y;
3686 case I915_FORMAT_MOD_Y_TILED_CCS:
3687 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3688 case I915_FORMAT_MOD_Yf_TILED:
3689 return PLANE_CTL_TILED_YF;
3690 case I915_FORMAT_MOD_Yf_TILED_CCS:
3691 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3692 default:
3693 MISSING_CASE(fb_modifier);
3694 }
3695
3696 return 0;
3697 }
3698
3699 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3700 {
3701 switch (rotate) {
3702 case DRM_MODE_ROTATE_0:
3703 break;
3704 /*
3705 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3706 * while i915 HW rotation is clockwise, thats why this swapping.
3707 */
3708 case DRM_MODE_ROTATE_90:
3709 return PLANE_CTL_ROTATE_270;
3710 case DRM_MODE_ROTATE_180:
3711 return PLANE_CTL_ROTATE_180;
3712 case DRM_MODE_ROTATE_270:
3713 return PLANE_CTL_ROTATE_90;
3714 default:
3715 MISSING_CASE(rotate);
3716 }
3717
3718 return 0;
3719 }
3720
3721 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3722 {
3723 switch (reflect) {
3724 case 0:
3725 break;
3726 case DRM_MODE_REFLECT_X:
3727 return PLANE_CTL_FLIP_HORIZONTAL;
3728 case DRM_MODE_REFLECT_Y:
3729 default:
3730 MISSING_CASE(reflect);
3731 }
3732
3733 return 0;
3734 }
3735
3736 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3737 {
3738 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3739 u32 plane_ctl = 0;
3740
3741 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3742 return plane_ctl;
3743
3744 if (crtc_state->gamma_enable)
3745 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
3746
3747 if (crtc_state->csc_enable)
3748 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
3749
3750 return plane_ctl;
3751 }
3752
3753 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3754 const struct intel_plane_state *plane_state)
3755 {
3756 struct drm_i915_private *dev_priv =
3757 to_i915(plane_state->base.plane->dev);
3758 const struct drm_framebuffer *fb = plane_state->base.fb;
3759 unsigned int rotation = plane_state->base.rotation;
3760 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3761 u32 plane_ctl;
3762
3763 plane_ctl = PLANE_CTL_ENABLE;
3764
3765 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3766 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3767 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3768
3769 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3770 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3771
3772 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3773 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3774 }
3775
3776 plane_ctl |= skl_plane_ctl_format(fb->format->format);
3777 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3778 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3779
3780 if (INTEL_GEN(dev_priv) >= 10)
3781 plane_ctl |= cnl_plane_ctl_flip(rotation &
3782 DRM_MODE_REFLECT_MASK);
3783
3784 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3785 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3786 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3787 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3788
3789 return plane_ctl;
3790 }
3791
3792 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
3793 {
3794 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3795 u32 plane_color_ctl = 0;
3796
3797 if (INTEL_GEN(dev_priv) >= 11)
3798 return plane_color_ctl;
3799
3800 if (crtc_state->gamma_enable)
3801 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3802
3803 if (crtc_state->csc_enable)
3804 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3805
3806 return plane_color_ctl;
3807 }
3808
3809 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3810 const struct intel_plane_state *plane_state)
3811 {
3812 struct drm_i915_private *dev_priv =
3813 to_i915(plane_state->base.plane->dev);
3814 const struct drm_framebuffer *fb = plane_state->base.fb;
3815 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3816 u32 plane_color_ctl = 0;
3817
3818 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3819 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3820
3821 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
3822 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3823 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3824 else
3825 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3826
3827 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3828 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3829 } else if (fb->format->is_yuv) {
3830 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3831 }
3832
3833 return plane_color_ctl;
3834 }
3835
3836 static int
3837 __intel_display_resume(struct drm_device *dev,
3838 struct drm_atomic_state *state,
3839 struct drm_modeset_acquire_ctx *ctx)
3840 {
3841 struct drm_crtc_state *crtc_state;
3842 struct drm_crtc *crtc;
3843 int i, ret;
3844
3845 intel_modeset_setup_hw_state(dev, ctx);
3846 i915_redisable_vga(to_i915(dev));
3847
3848 if (!state)
3849 return 0;
3850
3851 /*
3852 * We've duplicated the state, pointers to the old state are invalid.
3853 *
3854 * Don't attempt to use the old state until we commit the duplicated state.
3855 */
3856 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3857 /*
3858 * Force recalculation even if we restore
3859 * current state. With fast modeset this may not result
3860 * in a modeset when the state is compatible.
3861 */
3862 crtc_state->mode_changed = true;
3863 }
3864
3865 /* ignore any reset values/BIOS leftovers in the WM registers */
3866 if (!HAS_GMCH(to_i915(dev)))
3867 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3868
3869 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3870
3871 WARN_ON(ret == -EDEADLK);
3872 return ret;
3873 }
3874
3875 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3876 {
3877 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3878 intel_has_gpu_reset(dev_priv));
3879 }
3880
3881 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3882 {
3883 struct drm_device *dev = &dev_priv->drm;
3884 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3885 struct drm_atomic_state *state;
3886 int ret;
3887
3888 /* reset doesn't touch the display */
3889 if (!i915_modparams.force_reset_modeset_test &&
3890 !gpu_reset_clobbers_display(dev_priv))
3891 return;
3892
3893 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3894 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3895 wake_up_all(&dev_priv->gpu_error.wait_queue);
3896
3897 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3898 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3899 i915_gem_set_wedged(dev_priv);
3900 }
3901
3902 /*
3903 * Need mode_config.mutex so that we don't
3904 * trample ongoing ->detect() and whatnot.
3905 */
3906 mutex_lock(&dev->mode_config.mutex);
3907 drm_modeset_acquire_init(ctx, 0);
3908 while (1) {
3909 ret = drm_modeset_lock_all_ctx(dev, ctx);
3910 if (ret != -EDEADLK)
3911 break;
3912
3913 drm_modeset_backoff(ctx);
3914 }
3915 /*
3916 * Disabling the crtcs gracefully seems nicer. Also the
3917 * g33 docs say we should at least disable all the planes.
3918 */
3919 state = drm_atomic_helper_duplicate_state(dev, ctx);
3920 if (IS_ERR(state)) {
3921 ret = PTR_ERR(state);
3922 DRM_ERROR("Duplicating state failed with %i\n", ret);
3923 return;
3924 }
3925
3926 ret = drm_atomic_helper_disable_all(dev, ctx);
3927 if (ret) {
3928 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3929 drm_atomic_state_put(state);
3930 return;
3931 }
3932
3933 dev_priv->modeset_restore_state = state;
3934 state->acquire_ctx = ctx;
3935 }
3936
3937 void intel_finish_reset(struct drm_i915_private *dev_priv)
3938 {
3939 struct drm_device *dev = &dev_priv->drm;
3940 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3941 struct drm_atomic_state *state;
3942 int ret;
3943
3944 /* reset doesn't touch the display */
3945 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3946 return;
3947
3948 state = fetch_and_zero(&dev_priv->modeset_restore_state);
3949 if (!state)
3950 goto unlock;
3951
3952 /* reset doesn't touch the display */
3953 if (!gpu_reset_clobbers_display(dev_priv)) {
3954 /* for testing only restore the display */
3955 ret = __intel_display_resume(dev, state, ctx);
3956 if (ret)
3957 DRM_ERROR("Restoring old state failed with %i\n", ret);
3958 } else {
3959 /*
3960 * The display has been reset as well,
3961 * so need a full re-initialization.
3962 */
3963 intel_pps_unlock_regs_wa(dev_priv);
3964 intel_modeset_init_hw(dev);
3965 intel_init_clock_gating(dev_priv);
3966
3967 spin_lock_irq(&dev_priv->irq_lock);
3968 if (dev_priv->display.hpd_irq_setup)
3969 dev_priv->display.hpd_irq_setup(dev_priv);
3970 spin_unlock_irq(&dev_priv->irq_lock);
3971
3972 ret = __intel_display_resume(dev, state, ctx);
3973 if (ret)
3974 DRM_ERROR("Restoring old state failed with %i\n", ret);
3975
3976 intel_hpd_init(dev_priv);
3977 }
3978
3979 drm_atomic_state_put(state);
3980 unlock:
3981 drm_modeset_drop_locks(ctx);
3982 drm_modeset_acquire_fini(ctx);
3983 mutex_unlock(&dev->mode_config.mutex);
3984
3985 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3986 }
3987
3988 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
3989 {
3990 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3991 enum pipe pipe = crtc->pipe;
3992 u32 tmp;
3993
3994 tmp = I915_READ(PIPE_CHICKEN(pipe));
3995
3996 /*
3997 * Display WA #1153: icl
3998 * enable hardware to bypass the alpha math
3999 * and rounding for per-pixel values 00 and 0xff
4000 */
4001 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4002 /*
4003 * Display WA # 1605353570: icl
4004 * Set the pixel rounding bit to 1 for allowing
4005 * passthrough of Frame buffer pixels unmodified
4006 * across pipe
4007 */
4008 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4009 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4010 }
4011
4012 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4013 const struct intel_crtc_state *new_crtc_state)
4014 {
4015 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4016 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4017
4018 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4019 crtc->base.mode = new_crtc_state->base.mode;
4020
4021 /*
4022 * Update pipe size and adjust fitter if needed: the reason for this is
4023 * that in compute_mode_changes we check the native mode (not the pfit
4024 * mode) to see if we can flip rather than do a full mode set. In the
4025 * fastboot case, we'll flip, but if we don't update the pipesrc and
4026 * pfit state, we'll end up with a big fb scanned out into the wrong
4027 * sized surface.
4028 */
4029
4030 I915_WRITE(PIPESRC(crtc->pipe),
4031 ((new_crtc_state->pipe_src_w - 1) << 16) |
4032 (new_crtc_state->pipe_src_h - 1));
4033
4034 /* on skylake this is done by detaching scalers */
4035 if (INTEL_GEN(dev_priv) >= 9) {
4036 skl_detach_scalers(new_crtc_state);
4037
4038 if (new_crtc_state->pch_pfit.enabled)
4039 skylake_pfit_enable(new_crtc_state);
4040 } else if (HAS_PCH_SPLIT(dev_priv)) {
4041 if (new_crtc_state->pch_pfit.enabled)
4042 ironlake_pfit_enable(new_crtc_state);
4043 else if (old_crtc_state->pch_pfit.enabled)
4044 ironlake_pfit_disable(old_crtc_state);
4045 }
4046
4047 if (INTEL_GEN(dev_priv) >= 11)
4048 icl_set_pipe_chicken(crtc);
4049 }
4050
4051 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4052 {
4053 struct drm_device *dev = crtc->base.dev;
4054 struct drm_i915_private *dev_priv = to_i915(dev);
4055 int pipe = crtc->pipe;
4056 i915_reg_t reg;
4057 u32 temp;
4058
4059 /* enable normal train */
4060 reg = FDI_TX_CTL(pipe);
4061 temp = I915_READ(reg);
4062 if (IS_IVYBRIDGE(dev_priv)) {
4063 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4064 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4065 } else {
4066 temp &= ~FDI_LINK_TRAIN_NONE;
4067 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4068 }
4069 I915_WRITE(reg, temp);
4070
4071 reg = FDI_RX_CTL(pipe);
4072 temp = I915_READ(reg);
4073 if (HAS_PCH_CPT(dev_priv)) {
4074 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4075 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4076 } else {
4077 temp &= ~FDI_LINK_TRAIN_NONE;
4078 temp |= FDI_LINK_TRAIN_NONE;
4079 }
4080 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4081
4082 /* wait one idle pattern time */
4083 POSTING_READ(reg);
4084 udelay(1000);
4085
4086 /* IVB wants error correction enabled */
4087 if (IS_IVYBRIDGE(dev_priv))
4088 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4089 FDI_FE_ERRC_ENABLE);
4090 }
4091
4092 /* The FDI link training functions for ILK/Ibexpeak. */
4093 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4094 const struct intel_crtc_state *crtc_state)
4095 {
4096 struct drm_device *dev = crtc->base.dev;
4097 struct drm_i915_private *dev_priv = to_i915(dev);
4098 int pipe = crtc->pipe;
4099 i915_reg_t reg;
4100 u32 temp, tries;
4101
4102 /* FDI needs bits from pipe first */
4103 assert_pipe_enabled(dev_priv, pipe);
4104
4105 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4106 for train result */
4107 reg = FDI_RX_IMR(pipe);
4108 temp = I915_READ(reg);
4109 temp &= ~FDI_RX_SYMBOL_LOCK;
4110 temp &= ~FDI_RX_BIT_LOCK;
4111 I915_WRITE(reg, temp);
4112 I915_READ(reg);
4113 udelay(150);
4114
4115 /* enable CPU FDI TX and PCH FDI RX */
4116 reg = FDI_TX_CTL(pipe);
4117 temp = I915_READ(reg);
4118 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4119 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4120 temp &= ~FDI_LINK_TRAIN_NONE;
4121 temp |= FDI_LINK_TRAIN_PATTERN_1;
4122 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4123
4124 reg = FDI_RX_CTL(pipe);
4125 temp = I915_READ(reg);
4126 temp &= ~FDI_LINK_TRAIN_NONE;
4127 temp |= FDI_LINK_TRAIN_PATTERN_1;
4128 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4129
4130 POSTING_READ(reg);
4131 udelay(150);
4132
4133 /* Ironlake workaround, enable clock pointer after FDI enable*/
4134 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4135 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4136 FDI_RX_PHASE_SYNC_POINTER_EN);
4137
4138 reg = FDI_RX_IIR(pipe);
4139 for (tries = 0; tries < 5; tries++) {
4140 temp = I915_READ(reg);
4141 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4142
4143 if ((temp & FDI_RX_BIT_LOCK)) {
4144 DRM_DEBUG_KMS("FDI train 1 done.\n");
4145 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4146 break;
4147 }
4148 }
4149 if (tries == 5)
4150 DRM_ERROR("FDI train 1 fail!\n");
4151
4152 /* Train 2 */
4153 reg = FDI_TX_CTL(pipe);
4154 temp = I915_READ(reg);
4155 temp &= ~FDI_LINK_TRAIN_NONE;
4156 temp |= FDI_LINK_TRAIN_PATTERN_2;
4157 I915_WRITE(reg, temp);
4158
4159 reg = FDI_RX_CTL(pipe);
4160 temp = I915_READ(reg);
4161 temp &= ~FDI_LINK_TRAIN_NONE;
4162 temp |= FDI_LINK_TRAIN_PATTERN_2;
4163 I915_WRITE(reg, temp);
4164
4165 POSTING_READ(reg);
4166 udelay(150);
4167
4168 reg = FDI_RX_IIR(pipe);
4169 for (tries = 0; tries < 5; tries++) {
4170 temp = I915_READ(reg);
4171 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4172
4173 if (temp & FDI_RX_SYMBOL_LOCK) {
4174 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4175 DRM_DEBUG_KMS("FDI train 2 done.\n");
4176 break;
4177 }
4178 }
4179 if (tries == 5)
4180 DRM_ERROR("FDI train 2 fail!\n");
4181
4182 DRM_DEBUG_KMS("FDI train done\n");
4183
4184 }
4185
4186 static const int snb_b_fdi_train_param[] = {
4187 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4188 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4189 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4190 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4191 };
4192
4193 /* The FDI link training functions for SNB/Cougarpoint. */
4194 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4195 const struct intel_crtc_state *crtc_state)
4196 {
4197 struct drm_device *dev = crtc->base.dev;
4198 struct drm_i915_private *dev_priv = to_i915(dev);
4199 int pipe = crtc->pipe;
4200 i915_reg_t reg;
4201 u32 temp, i, retry;
4202
4203 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4204 for train result */
4205 reg = FDI_RX_IMR(pipe);
4206 temp = I915_READ(reg);
4207 temp &= ~FDI_RX_SYMBOL_LOCK;
4208 temp &= ~FDI_RX_BIT_LOCK;
4209 I915_WRITE(reg, temp);
4210
4211 POSTING_READ(reg);
4212 udelay(150);
4213
4214 /* enable CPU FDI TX and PCH FDI RX */
4215 reg = FDI_TX_CTL(pipe);
4216 temp = I915_READ(reg);
4217 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4218 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4219 temp &= ~FDI_LINK_TRAIN_NONE;
4220 temp |= FDI_LINK_TRAIN_PATTERN_1;
4221 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4222 /* SNB-B */
4223 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4224 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4225
4226 I915_WRITE(FDI_RX_MISC(pipe),
4227 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4228
4229 reg = FDI_RX_CTL(pipe);
4230 temp = I915_READ(reg);
4231 if (HAS_PCH_CPT(dev_priv)) {
4232 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4233 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4234 } else {
4235 temp &= ~FDI_LINK_TRAIN_NONE;
4236 temp |= FDI_LINK_TRAIN_PATTERN_1;
4237 }
4238 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4239
4240 POSTING_READ(reg);
4241 udelay(150);
4242
4243 for (i = 0; i < 4; i++) {
4244 reg = FDI_TX_CTL(pipe);
4245 temp = I915_READ(reg);
4246 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4247 temp |= snb_b_fdi_train_param[i];
4248 I915_WRITE(reg, temp);
4249
4250 POSTING_READ(reg);
4251 udelay(500);
4252
4253 for (retry = 0; retry < 5; retry++) {
4254 reg = FDI_RX_IIR(pipe);
4255 temp = I915_READ(reg);
4256 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4257 if (temp & FDI_RX_BIT_LOCK) {
4258 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4259 DRM_DEBUG_KMS("FDI train 1 done.\n");
4260 break;
4261 }
4262 udelay(50);
4263 }
4264 if (retry < 5)
4265 break;
4266 }
4267 if (i == 4)
4268 DRM_ERROR("FDI train 1 fail!\n");
4269
4270 /* Train 2 */
4271 reg = FDI_TX_CTL(pipe);
4272 temp = I915_READ(reg);
4273 temp &= ~FDI_LINK_TRAIN_NONE;
4274 temp |= FDI_LINK_TRAIN_PATTERN_2;
4275 if (IS_GEN(dev_priv, 6)) {
4276 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4277 /* SNB-B */
4278 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4279 }
4280 I915_WRITE(reg, temp);
4281
4282 reg = FDI_RX_CTL(pipe);
4283 temp = I915_READ(reg);
4284 if (HAS_PCH_CPT(dev_priv)) {
4285 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4286 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4287 } else {
4288 temp &= ~FDI_LINK_TRAIN_NONE;
4289 temp |= FDI_LINK_TRAIN_PATTERN_2;
4290 }
4291 I915_WRITE(reg, temp);
4292
4293 POSTING_READ(reg);
4294 udelay(150);
4295
4296 for (i = 0; i < 4; i++) {
4297 reg = FDI_TX_CTL(pipe);
4298 temp = I915_READ(reg);
4299 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4300 temp |= snb_b_fdi_train_param[i];
4301 I915_WRITE(reg, temp);
4302
4303 POSTING_READ(reg);
4304 udelay(500);
4305
4306 for (retry = 0; retry < 5; retry++) {
4307 reg = FDI_RX_IIR(pipe);
4308 temp = I915_READ(reg);
4309 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4310 if (temp & FDI_RX_SYMBOL_LOCK) {
4311 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4312 DRM_DEBUG_KMS("FDI train 2 done.\n");
4313 break;
4314 }
4315 udelay(50);
4316 }
4317 if (retry < 5)
4318 break;
4319 }
4320 if (i == 4)
4321 DRM_ERROR("FDI train 2 fail!\n");
4322
4323 DRM_DEBUG_KMS("FDI train done.\n");
4324 }
4325
4326 /* Manual link training for Ivy Bridge A0 parts */
4327 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4328 const struct intel_crtc_state *crtc_state)
4329 {
4330 struct drm_device *dev = crtc->base.dev;
4331 struct drm_i915_private *dev_priv = to_i915(dev);
4332 int pipe = crtc->pipe;
4333 i915_reg_t reg;
4334 u32 temp, i, j;
4335
4336 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4337 for train result */
4338 reg = FDI_RX_IMR(pipe);
4339 temp = I915_READ(reg);
4340 temp &= ~FDI_RX_SYMBOL_LOCK;
4341 temp &= ~FDI_RX_BIT_LOCK;
4342 I915_WRITE(reg, temp);
4343
4344 POSTING_READ(reg);
4345 udelay(150);
4346
4347 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4348 I915_READ(FDI_RX_IIR(pipe)));
4349
4350 /* Try each vswing and preemphasis setting twice before moving on */
4351 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4352 /* disable first in case we need to retry */
4353 reg = FDI_TX_CTL(pipe);
4354 temp = I915_READ(reg);
4355 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4356 temp &= ~FDI_TX_ENABLE;
4357 I915_WRITE(reg, temp);
4358
4359 reg = FDI_RX_CTL(pipe);
4360 temp = I915_READ(reg);
4361 temp &= ~FDI_LINK_TRAIN_AUTO;
4362 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4363 temp &= ~FDI_RX_ENABLE;
4364 I915_WRITE(reg, temp);
4365
4366 /* enable CPU FDI TX and PCH FDI RX */
4367 reg = FDI_TX_CTL(pipe);
4368 temp = I915_READ(reg);
4369 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4370 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4371 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4372 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4373 temp |= snb_b_fdi_train_param[j/2];
4374 temp |= FDI_COMPOSITE_SYNC;
4375 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4376
4377 I915_WRITE(FDI_RX_MISC(pipe),
4378 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4379
4380 reg = FDI_RX_CTL(pipe);
4381 temp = I915_READ(reg);
4382 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4383 temp |= FDI_COMPOSITE_SYNC;
4384 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4385
4386 POSTING_READ(reg);
4387 udelay(1); /* should be 0.5us */
4388
4389 for (i = 0; i < 4; i++) {
4390 reg = FDI_RX_IIR(pipe);
4391 temp = I915_READ(reg);
4392 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4393
4394 if (temp & FDI_RX_BIT_LOCK ||
4395 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4396 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4397 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4398 i);
4399 break;
4400 }
4401 udelay(1); /* should be 0.5us */
4402 }
4403 if (i == 4) {
4404 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4405 continue;
4406 }
4407
4408 /* Train 2 */
4409 reg = FDI_TX_CTL(pipe);
4410 temp = I915_READ(reg);
4411 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4412 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4413 I915_WRITE(reg, temp);
4414
4415 reg = FDI_RX_CTL(pipe);
4416 temp = I915_READ(reg);
4417 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4418 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4419 I915_WRITE(reg, temp);
4420
4421 POSTING_READ(reg);
4422 udelay(2); /* should be 1.5us */
4423
4424 for (i = 0; i < 4; i++) {
4425 reg = FDI_RX_IIR(pipe);
4426 temp = I915_READ(reg);
4427 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4428
4429 if (temp & FDI_RX_SYMBOL_LOCK ||
4430 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4431 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4432 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4433 i);
4434 goto train_done;
4435 }
4436 udelay(2); /* should be 1.5us */
4437 }
4438 if (i == 4)
4439 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4440 }
4441
4442 train_done:
4443 DRM_DEBUG_KMS("FDI train done.\n");
4444 }
4445
4446 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4447 {
4448 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4449 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4450 int pipe = intel_crtc->pipe;
4451 i915_reg_t reg;
4452 u32 temp;
4453
4454 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4455 reg = FDI_RX_CTL(pipe);
4456 temp = I915_READ(reg);
4457 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4458 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4459 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4460 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4461
4462 POSTING_READ(reg);
4463 udelay(200);
4464
4465 /* Switch from Rawclk to PCDclk */
4466 temp = I915_READ(reg);
4467 I915_WRITE(reg, temp | FDI_PCDCLK);
4468
4469 POSTING_READ(reg);
4470 udelay(200);
4471
4472 /* Enable CPU FDI TX PLL, always on for Ironlake */
4473 reg = FDI_TX_CTL(pipe);
4474 temp = I915_READ(reg);
4475 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4476 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4477
4478 POSTING_READ(reg);
4479 udelay(100);
4480 }
4481 }
4482
4483 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4484 {
4485 struct drm_device *dev = intel_crtc->base.dev;
4486 struct drm_i915_private *dev_priv = to_i915(dev);
4487 int pipe = intel_crtc->pipe;
4488 i915_reg_t reg;
4489 u32 temp;
4490
4491 /* Switch from PCDclk to Rawclk */
4492 reg = FDI_RX_CTL(pipe);
4493 temp = I915_READ(reg);
4494 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4495
4496 /* Disable CPU FDI TX PLL */
4497 reg = FDI_TX_CTL(pipe);
4498 temp = I915_READ(reg);
4499 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4500
4501 POSTING_READ(reg);
4502 udelay(100);
4503
4504 reg = FDI_RX_CTL(pipe);
4505 temp = I915_READ(reg);
4506 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4507
4508 /* Wait for the clocks to turn off. */
4509 POSTING_READ(reg);
4510 udelay(100);
4511 }
4512
4513 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4514 {
4515 struct drm_device *dev = crtc->dev;
4516 struct drm_i915_private *dev_priv = to_i915(dev);
4517 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4518 int pipe = intel_crtc->pipe;
4519 i915_reg_t reg;
4520 u32 temp;
4521
4522 /* disable CPU FDI tx and PCH FDI rx */
4523 reg = FDI_TX_CTL(pipe);
4524 temp = I915_READ(reg);
4525 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4526 POSTING_READ(reg);
4527
4528 reg = FDI_RX_CTL(pipe);
4529 temp = I915_READ(reg);
4530 temp &= ~(0x7 << 16);
4531 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4532 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4533
4534 POSTING_READ(reg);
4535 udelay(100);
4536
4537 /* Ironlake workaround, disable clock pointer after downing FDI */
4538 if (HAS_PCH_IBX(dev_priv))
4539 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4540
4541 /* still set train pattern 1 */
4542 reg = FDI_TX_CTL(pipe);
4543 temp = I915_READ(reg);
4544 temp &= ~FDI_LINK_TRAIN_NONE;
4545 temp |= FDI_LINK_TRAIN_PATTERN_1;
4546 I915_WRITE(reg, temp);
4547
4548 reg = FDI_RX_CTL(pipe);
4549 temp = I915_READ(reg);
4550 if (HAS_PCH_CPT(dev_priv)) {
4551 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4552 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4553 } else {
4554 temp &= ~FDI_LINK_TRAIN_NONE;
4555 temp |= FDI_LINK_TRAIN_PATTERN_1;
4556 }
4557 /* BPC in FDI rx is consistent with that in PIPECONF */
4558 temp &= ~(0x07 << 16);
4559 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4560 I915_WRITE(reg, temp);
4561
4562 POSTING_READ(reg);
4563 udelay(100);
4564 }
4565
4566 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4567 {
4568 struct drm_crtc *crtc;
4569 bool cleanup_done;
4570
4571 drm_for_each_crtc(crtc, &dev_priv->drm) {
4572 struct drm_crtc_commit *commit;
4573 spin_lock(&crtc->commit_lock);
4574 commit = list_first_entry_or_null(&crtc->commit_list,
4575 struct drm_crtc_commit, commit_entry);
4576 cleanup_done = commit ?
4577 try_wait_for_completion(&commit->cleanup_done) : true;
4578 spin_unlock(&crtc->commit_lock);
4579
4580 if (cleanup_done)
4581 continue;
4582
4583 drm_crtc_wait_one_vblank(crtc);
4584
4585 return true;
4586 }
4587
4588 return false;
4589 }
4590
4591 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4592 {
4593 u32 temp;
4594
4595 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4596
4597 mutex_lock(&dev_priv->sb_lock);
4598
4599 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4600 temp |= SBI_SSCCTL_DISABLE;
4601 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4602
4603 mutex_unlock(&dev_priv->sb_lock);
4604 }
4605
4606 /* Program iCLKIP clock to the desired frequency */
4607 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4608 {
4609 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4610 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4611 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4612 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4613 u32 temp;
4614
4615 lpt_disable_iclkip(dev_priv);
4616
4617 /* The iCLK virtual clock root frequency is in MHz,
4618 * but the adjusted_mode->crtc_clock in in KHz. To get the
4619 * divisors, it is necessary to divide one by another, so we
4620 * convert the virtual clock precision to KHz here for higher
4621 * precision.
4622 */
4623 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4624 u32 iclk_virtual_root_freq = 172800 * 1000;
4625 u32 iclk_pi_range = 64;
4626 u32 desired_divisor;
4627
4628 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4629 clock << auxdiv);
4630 divsel = (desired_divisor / iclk_pi_range) - 2;
4631 phaseinc = desired_divisor % iclk_pi_range;
4632
4633 /*
4634 * Near 20MHz is a corner case which is
4635 * out of range for the 7-bit divisor
4636 */
4637 if (divsel <= 0x7f)
4638 break;
4639 }
4640
4641 /* This should not happen with any sane values */
4642 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4643 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4644 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4645 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4646
4647 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4648 clock,
4649 auxdiv,
4650 divsel,
4651 phasedir,
4652 phaseinc);
4653
4654 mutex_lock(&dev_priv->sb_lock);
4655
4656 /* Program SSCDIVINTPHASE6 */
4657 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4658 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4659 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4660 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4661 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4662 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4663 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4664 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4665
4666 /* Program SSCAUXDIV */
4667 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4668 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4669 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4670 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4671
4672 /* Enable modulator and associated divider */
4673 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4674 temp &= ~SBI_SSCCTL_DISABLE;
4675 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4676
4677 mutex_unlock(&dev_priv->sb_lock);
4678
4679 /* Wait for initialization time */
4680 udelay(24);
4681
4682 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4683 }
4684
4685 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4686 {
4687 u32 divsel, phaseinc, auxdiv;
4688 u32 iclk_virtual_root_freq = 172800 * 1000;
4689 u32 iclk_pi_range = 64;
4690 u32 desired_divisor;
4691 u32 temp;
4692
4693 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4694 return 0;
4695
4696 mutex_lock(&dev_priv->sb_lock);
4697
4698 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4699 if (temp & SBI_SSCCTL_DISABLE) {
4700 mutex_unlock(&dev_priv->sb_lock);
4701 return 0;
4702 }
4703
4704 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4705 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4706 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4707 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4708 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4709
4710 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4711 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4712 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4713
4714 mutex_unlock(&dev_priv->sb_lock);
4715
4716 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4717
4718 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4719 desired_divisor << auxdiv);
4720 }
4721
4722 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4723 enum pipe pch_transcoder)
4724 {
4725 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4726 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4727 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4728
4729 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4730 I915_READ(HTOTAL(cpu_transcoder)));
4731 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4732 I915_READ(HBLANK(cpu_transcoder)));
4733 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4734 I915_READ(HSYNC(cpu_transcoder)));
4735
4736 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4737 I915_READ(VTOTAL(cpu_transcoder)));
4738 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4739 I915_READ(VBLANK(cpu_transcoder)));
4740 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4741 I915_READ(VSYNC(cpu_transcoder)));
4742 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4743 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4744 }
4745
4746 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4747 {
4748 u32 temp;
4749
4750 temp = I915_READ(SOUTH_CHICKEN1);
4751 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4752 return;
4753
4754 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4755 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4756
4757 temp &= ~FDI_BC_BIFURCATION_SELECT;
4758 if (enable)
4759 temp |= FDI_BC_BIFURCATION_SELECT;
4760
4761 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4762 I915_WRITE(SOUTH_CHICKEN1, temp);
4763 POSTING_READ(SOUTH_CHICKEN1);
4764 }
4765
4766 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4767 {
4768 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4769 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4770
4771 switch (crtc->pipe) {
4772 case PIPE_A:
4773 break;
4774 case PIPE_B:
4775 if (crtc_state->fdi_lanes > 2)
4776 cpt_set_fdi_bc_bifurcation(dev_priv, false);
4777 else
4778 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4779
4780 break;
4781 case PIPE_C:
4782 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4783
4784 break;
4785 default:
4786 BUG();
4787 }
4788 }
4789
4790 /*
4791 * Finds the encoder associated with the given CRTC. This can only be
4792 * used when we know that the CRTC isn't feeding multiple encoders!
4793 */
4794 static struct intel_encoder *
4795 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4796 const struct intel_crtc_state *crtc_state)
4797 {
4798 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4799 const struct drm_connector_state *connector_state;
4800 const struct drm_connector *connector;
4801 struct intel_encoder *encoder = NULL;
4802 int num_encoders = 0;
4803 int i;
4804
4805 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4806 if (connector_state->crtc != &crtc->base)
4807 continue;
4808
4809 encoder = to_intel_encoder(connector_state->best_encoder);
4810 num_encoders++;
4811 }
4812
4813 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4814 num_encoders, pipe_name(crtc->pipe));
4815
4816 return encoder;
4817 }
4818
4819 /*
4820 * Enable PCH resources required for PCH ports:
4821 * - PCH PLLs
4822 * - FDI training & RX/TX
4823 * - update transcoder timings
4824 * - DP transcoding bits
4825 * - transcoder
4826 */
4827 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4828 const struct intel_crtc_state *crtc_state)
4829 {
4830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4831 struct drm_device *dev = crtc->base.dev;
4832 struct drm_i915_private *dev_priv = to_i915(dev);
4833 int pipe = crtc->pipe;
4834 u32 temp;
4835
4836 assert_pch_transcoder_disabled(dev_priv, pipe);
4837
4838 if (IS_IVYBRIDGE(dev_priv))
4839 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4840
4841 /* Write the TU size bits before fdi link training, so that error
4842 * detection works. */
4843 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4844 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4845
4846 /* For PCH output, training FDI link */
4847 dev_priv->display.fdi_link_train(crtc, crtc_state);
4848
4849 /* We need to program the right clock selection before writing the pixel
4850 * mutliplier into the DPLL. */
4851 if (HAS_PCH_CPT(dev_priv)) {
4852 u32 sel;
4853
4854 temp = I915_READ(PCH_DPLL_SEL);
4855 temp |= TRANS_DPLL_ENABLE(pipe);
4856 sel = TRANS_DPLLB_SEL(pipe);
4857 if (crtc_state->shared_dpll ==
4858 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4859 temp |= sel;
4860 else
4861 temp &= ~sel;
4862 I915_WRITE(PCH_DPLL_SEL, temp);
4863 }
4864
4865 /* XXX: pch pll's can be enabled any time before we enable the PCH
4866 * transcoder, and we actually should do this to not upset any PCH
4867 * transcoder that already use the clock when we share it.
4868 *
4869 * Note that enable_shared_dpll tries to do the right thing, but
4870 * get_shared_dpll unconditionally resets the pll - we need that to have
4871 * the right LVDS enable sequence. */
4872 intel_enable_shared_dpll(crtc_state);
4873
4874 /* set transcoder timing, panel must allow it */
4875 assert_panel_unlocked(dev_priv, pipe);
4876 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4877
4878 intel_fdi_normal_train(crtc);
4879
4880 /* For PCH DP, enable TRANS_DP_CTL */
4881 if (HAS_PCH_CPT(dev_priv) &&
4882 intel_crtc_has_dp_encoder(crtc_state)) {
4883 const struct drm_display_mode *adjusted_mode =
4884 &crtc_state->base.adjusted_mode;
4885 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4886 i915_reg_t reg = TRANS_DP_CTL(pipe);
4887 enum port port;
4888
4889 temp = I915_READ(reg);
4890 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4891 TRANS_DP_SYNC_MASK |
4892 TRANS_DP_BPC_MASK);
4893 temp |= TRANS_DP_OUTPUT_ENABLE;
4894 temp |= bpc << 9; /* same format but at 11:9 */
4895
4896 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4897 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4898 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4899 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4900
4901 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4902 WARN_ON(port < PORT_B || port > PORT_D);
4903 temp |= TRANS_DP_PORT_SEL(port);
4904
4905 I915_WRITE(reg, temp);
4906 }
4907
4908 ironlake_enable_pch_transcoder(crtc_state);
4909 }
4910
4911 static void lpt_pch_enable(const struct intel_atomic_state *state,
4912 const struct intel_crtc_state *crtc_state)
4913 {
4914 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4915 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4916 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4917
4918 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4919
4920 lpt_program_iclkip(crtc_state);
4921
4922 /* Set transcoder timing. */
4923 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4924
4925 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4926 }
4927
4928 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4929 {
4930 struct drm_i915_private *dev_priv = to_i915(dev);
4931 i915_reg_t dslreg = PIPEDSL(pipe);
4932 u32 temp;
4933
4934 temp = I915_READ(dslreg);
4935 udelay(500);
4936 if (wait_for(I915_READ(dslreg) != temp, 5)) {
4937 if (wait_for(I915_READ(dslreg) != temp, 5))
4938 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4939 }
4940 }
4941
4942 /*
4943 * The hardware phase 0.0 refers to the center of the pixel.
4944 * We want to start from the top/left edge which is phase
4945 * -0.5. That matches how the hardware calculates the scaling
4946 * factors (from top-left of the first pixel to bottom-right
4947 * of the last pixel, as opposed to the pixel centers).
4948 *
4949 * For 4:2:0 subsampled chroma planes we obviously have to
4950 * adjust that so that the chroma sample position lands in
4951 * the right spot.
4952 *
4953 * Note that for packed YCbCr 4:2:2 formats there is no way to
4954 * control chroma siting. The hardware simply replicates the
4955 * chroma samples for both of the luma samples, and thus we don't
4956 * actually get the expected MPEG2 chroma siting convention :(
4957 * The same behaviour is observed on pre-SKL platforms as well.
4958 *
4959 * Theory behind the formula (note that we ignore sub-pixel
4960 * source coordinates):
4961 * s = source sample position
4962 * d = destination sample position
4963 *
4964 * Downscaling 4:1:
4965 * -0.5
4966 * | 0.0
4967 * | | 1.5 (initial phase)
4968 * | | |
4969 * v v v
4970 * | s | s | s | s |
4971 * | d |
4972 *
4973 * Upscaling 1:4:
4974 * -0.5
4975 * | -0.375 (initial phase)
4976 * | | 0.0
4977 * | | |
4978 * v v v
4979 * | s |
4980 * | d | d | d | d |
4981 */
4982 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
4983 {
4984 int phase = -0x8000;
4985 u16 trip = 0;
4986
4987 if (chroma_cosited)
4988 phase += (sub - 1) * 0x8000 / sub;
4989
4990 phase += scale / (2 * sub);
4991
4992 /*
4993 * Hardware initial phase limited to [-0.5:1.5].
4994 * Since the max hardware scale factor is 3.0, we
4995 * should never actually excdeed 1.0 here.
4996 */
4997 WARN_ON(phase < -0x8000 || phase > 0x18000);
4998
4999 if (phase < 0)
5000 phase = 0x10000 + phase;
5001 else
5002 trip = PS_PHASE_TRIP;
5003
5004 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5005 }
5006
5007 static int
5008 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5009 unsigned int scaler_user, int *scaler_id,
5010 int src_w, int src_h, int dst_w, int dst_h,
5011 const struct drm_format_info *format, bool need_scaler)
5012 {
5013 struct intel_crtc_scaler_state *scaler_state =
5014 &crtc_state->scaler_state;
5015 struct intel_crtc *intel_crtc =
5016 to_intel_crtc(crtc_state->base.crtc);
5017 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5018 const struct drm_display_mode *adjusted_mode =
5019 &crtc_state->base.adjusted_mode;
5020
5021 /*
5022 * Src coordinates are already rotated by 270 degrees for
5023 * the 90/270 degree plane rotation cases (to match the
5024 * GTT mapping), hence no need to account for rotation here.
5025 */
5026 if (src_w != dst_w || src_h != dst_h)
5027 need_scaler = true;
5028
5029 /*
5030 * Scaling/fitting not supported in IF-ID mode in GEN9+
5031 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5032 * Once NV12 is enabled, handle it here while allocating scaler
5033 * for NV12.
5034 */
5035 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5036 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5037 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5038 return -EINVAL;
5039 }
5040
5041 /*
5042 * if plane is being disabled or scaler is no more required or force detach
5043 * - free scaler binded to this plane/crtc
5044 * - in order to do this, update crtc->scaler_usage
5045 *
5046 * Here scaler state in crtc_state is set free so that
5047 * scaler can be assigned to other user. Actual register
5048 * update to free the scaler is done in plane/panel-fit programming.
5049 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5050 */
5051 if (force_detach || !need_scaler) {
5052 if (*scaler_id >= 0) {
5053 scaler_state->scaler_users &= ~(1 << scaler_user);
5054 scaler_state->scalers[*scaler_id].in_use = 0;
5055
5056 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5057 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5058 intel_crtc->pipe, scaler_user, *scaler_id,
5059 scaler_state->scaler_users);
5060 *scaler_id = -1;
5061 }
5062 return 0;
5063 }
5064
5065 if (format && is_planar_yuv_format(format->format) &&
5066 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5067 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5068 return -EINVAL;
5069 }
5070
5071 /* range checks */
5072 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5073 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5074 (INTEL_GEN(dev_priv) >= 11 &&
5075 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5076 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5077 (INTEL_GEN(dev_priv) < 11 &&
5078 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5079 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5080 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5081 "size is out of scaler range\n",
5082 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5083 return -EINVAL;
5084 }
5085
5086 /* mark this plane as a scaler user in crtc_state */
5087 scaler_state->scaler_users |= (1 << scaler_user);
5088 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5089 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5090 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5091 scaler_state->scaler_users);
5092
5093 return 0;
5094 }
5095
5096 /**
5097 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5098 *
5099 * @state: crtc's scaler state
5100 *
5101 * Return
5102 * 0 - scaler_usage updated successfully
5103 * error - requested scaling cannot be supported or other error condition
5104 */
5105 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5106 {
5107 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5108 bool need_scaler = false;
5109
5110 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5111 need_scaler = true;
5112
5113 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5114 &state->scaler_state.scaler_id,
5115 state->pipe_src_w, state->pipe_src_h,
5116 adjusted_mode->crtc_hdisplay,
5117 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5118 }
5119
5120 /**
5121 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5122 * @crtc_state: crtc's scaler state
5123 * @plane_state: atomic plane state to update
5124 *
5125 * Return
5126 * 0 - scaler_usage updated successfully
5127 * error - requested scaling cannot be supported or other error condition
5128 */
5129 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5130 struct intel_plane_state *plane_state)
5131 {
5132 struct intel_plane *intel_plane =
5133 to_intel_plane(plane_state->base.plane);
5134 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5135 struct drm_framebuffer *fb = plane_state->base.fb;
5136 int ret;
5137 bool force_detach = !fb || !plane_state->base.visible;
5138 bool need_scaler = false;
5139
5140 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5141 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5142 fb && is_planar_yuv_format(fb->format->format))
5143 need_scaler = true;
5144
5145 ret = skl_update_scaler(crtc_state, force_detach,
5146 drm_plane_index(&intel_plane->base),
5147 &plane_state->scaler_id,
5148 drm_rect_width(&plane_state->base.src) >> 16,
5149 drm_rect_height(&plane_state->base.src) >> 16,
5150 drm_rect_width(&plane_state->base.dst),
5151 drm_rect_height(&plane_state->base.dst),
5152 fb ? fb->format : NULL, need_scaler);
5153
5154 if (ret || plane_state->scaler_id < 0)
5155 return ret;
5156
5157 /* check colorkey */
5158 if (plane_state->ckey.flags) {
5159 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5160 intel_plane->base.base.id,
5161 intel_plane->base.name);
5162 return -EINVAL;
5163 }
5164
5165 /* Check src format */
5166 switch (fb->format->format) {
5167 case DRM_FORMAT_RGB565:
5168 case DRM_FORMAT_XBGR8888:
5169 case DRM_FORMAT_XRGB8888:
5170 case DRM_FORMAT_ABGR8888:
5171 case DRM_FORMAT_ARGB8888:
5172 case DRM_FORMAT_XRGB2101010:
5173 case DRM_FORMAT_XBGR2101010:
5174 case DRM_FORMAT_XBGR16161616F:
5175 case DRM_FORMAT_ABGR16161616F:
5176 case DRM_FORMAT_XRGB16161616F:
5177 case DRM_FORMAT_ARGB16161616F:
5178 case DRM_FORMAT_YUYV:
5179 case DRM_FORMAT_YVYU:
5180 case DRM_FORMAT_UYVY:
5181 case DRM_FORMAT_VYUY:
5182 case DRM_FORMAT_NV12:
5183 case DRM_FORMAT_P010:
5184 case DRM_FORMAT_P012:
5185 case DRM_FORMAT_P016:
5186 case DRM_FORMAT_Y210:
5187 case DRM_FORMAT_Y212:
5188 case DRM_FORMAT_Y216:
5189 case DRM_FORMAT_XVYU2101010:
5190 case DRM_FORMAT_XVYU12_16161616:
5191 case DRM_FORMAT_XVYU16161616:
5192 break;
5193 default:
5194 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5195 intel_plane->base.base.id, intel_plane->base.name,
5196 fb->base.id, fb->format->format);
5197 return -EINVAL;
5198 }
5199
5200 return 0;
5201 }
5202
5203 static void skylake_scaler_disable(struct intel_crtc *crtc)
5204 {
5205 int i;
5206
5207 for (i = 0; i < crtc->num_scalers; i++)
5208 skl_detach_scaler(crtc, i);
5209 }
5210
5211 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5212 {
5213 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5214 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5215 enum pipe pipe = crtc->pipe;
5216 const struct intel_crtc_scaler_state *scaler_state =
5217 &crtc_state->scaler_state;
5218
5219 if (crtc_state->pch_pfit.enabled) {
5220 u16 uv_rgb_hphase, uv_rgb_vphase;
5221 int pfit_w, pfit_h, hscale, vscale;
5222 int id;
5223
5224 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5225 return;
5226
5227 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5228 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5229
5230 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5231 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5232
5233 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5234 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5235
5236 id = scaler_state->scaler_id;
5237 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5238 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5239 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5240 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5241 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5242 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5243 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5244 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5245 }
5246 }
5247
5248 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5249 {
5250 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5251 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5252 int pipe = crtc->pipe;
5253
5254 if (crtc_state->pch_pfit.enabled) {
5255 /* Force use of hard-coded filter coefficients
5256 * as some pre-programmed values are broken,
5257 * e.g. x201.
5258 */
5259 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5260 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5261 PF_PIPE_SEL_IVB(pipe));
5262 else
5263 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5264 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5265 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5266 }
5267 }
5268
5269 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5270 {
5271 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5272 struct drm_device *dev = crtc->base.dev;
5273 struct drm_i915_private *dev_priv = to_i915(dev);
5274
5275 if (!crtc_state->ips_enabled)
5276 return;
5277
5278 /*
5279 * We can only enable IPS after we enable a plane and wait for a vblank
5280 * This function is called from post_plane_update, which is run after
5281 * a vblank wait.
5282 */
5283 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5284
5285 if (IS_BROADWELL(dev_priv)) {
5286 mutex_lock(&dev_priv->pcu_lock);
5287 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5288 IPS_ENABLE | IPS_PCODE_CONTROL));
5289 mutex_unlock(&dev_priv->pcu_lock);
5290 /* Quoting Art Runyan: "its not safe to expect any particular
5291 * value in IPS_CTL bit 31 after enabling IPS through the
5292 * mailbox." Moreover, the mailbox may return a bogus state,
5293 * so we need to just enable it and continue on.
5294 */
5295 } else {
5296 I915_WRITE(IPS_CTL, IPS_ENABLE);
5297 /* The bit only becomes 1 in the next vblank, so this wait here
5298 * is essentially intel_wait_for_vblank. If we don't have this
5299 * and don't wait for vblanks until the end of crtc_enable, then
5300 * the HW state readout code will complain that the expected
5301 * IPS_CTL value is not the one we read. */
5302 if (intel_wait_for_register(&dev_priv->uncore,
5303 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5304 50))
5305 DRM_ERROR("Timed out waiting for IPS enable\n");
5306 }
5307 }
5308
5309 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5310 {
5311 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5312 struct drm_device *dev = crtc->base.dev;
5313 struct drm_i915_private *dev_priv = to_i915(dev);
5314
5315 if (!crtc_state->ips_enabled)
5316 return;
5317
5318 if (IS_BROADWELL(dev_priv)) {
5319 mutex_lock(&dev_priv->pcu_lock);
5320 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5321 mutex_unlock(&dev_priv->pcu_lock);
5322 /*
5323 * Wait for PCODE to finish disabling IPS. The BSpec specified
5324 * 42ms timeout value leads to occasional timeouts so use 100ms
5325 * instead.
5326 */
5327 if (intel_wait_for_register(&dev_priv->uncore,
5328 IPS_CTL, IPS_ENABLE, 0,
5329 100))
5330 DRM_ERROR("Timed out waiting for IPS disable\n");
5331 } else {
5332 I915_WRITE(IPS_CTL, 0);
5333 POSTING_READ(IPS_CTL);
5334 }
5335
5336 /* We need to wait for a vblank before we can disable the plane. */
5337 intel_wait_for_vblank(dev_priv, crtc->pipe);
5338 }
5339
5340 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5341 {
5342 if (intel_crtc->overlay) {
5343 struct drm_device *dev = intel_crtc->base.dev;
5344
5345 mutex_lock(&dev->struct_mutex);
5346 (void) intel_overlay_switch_off(intel_crtc->overlay);
5347 mutex_unlock(&dev->struct_mutex);
5348 }
5349
5350 /* Let userspace switch the overlay on again. In most cases userspace
5351 * has to recompute where to put it anyway.
5352 */
5353 }
5354
5355 /**
5356 * intel_post_enable_primary - Perform operations after enabling primary plane
5357 * @crtc: the CRTC whose primary plane was just enabled
5358 * @new_crtc_state: the enabling state
5359 *
5360 * Performs potentially sleeping operations that must be done after the primary
5361 * plane is enabled, such as updating FBC and IPS. Note that this may be
5362 * called due to an explicit primary plane update, or due to an implicit
5363 * re-enable that is caused when a sprite plane is updated to no longer
5364 * completely hide the primary plane.
5365 */
5366 static void
5367 intel_post_enable_primary(struct drm_crtc *crtc,
5368 const struct intel_crtc_state *new_crtc_state)
5369 {
5370 struct drm_device *dev = crtc->dev;
5371 struct drm_i915_private *dev_priv = to_i915(dev);
5372 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5373 int pipe = intel_crtc->pipe;
5374
5375 /*
5376 * Gen2 reports pipe underruns whenever all planes are disabled.
5377 * So don't enable underrun reporting before at least some planes
5378 * are enabled.
5379 * FIXME: Need to fix the logic to work when we turn off all planes
5380 * but leave the pipe running.
5381 */
5382 if (IS_GEN(dev_priv, 2))
5383 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5384
5385 /* Underruns don't always raise interrupts, so check manually. */
5386 intel_check_cpu_fifo_underruns(dev_priv);
5387 intel_check_pch_fifo_underruns(dev_priv);
5388 }
5389
5390 /* FIXME get rid of this and use pre_plane_update */
5391 static void
5392 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5393 {
5394 struct drm_device *dev = crtc->dev;
5395 struct drm_i915_private *dev_priv = to_i915(dev);
5396 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5397 int pipe = intel_crtc->pipe;
5398
5399 /*
5400 * Gen2 reports pipe underruns whenever all planes are disabled.
5401 * So disable underrun reporting before all the planes get disabled.
5402 */
5403 if (IS_GEN(dev_priv, 2))
5404 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5405
5406 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5407
5408 /*
5409 * Vblank time updates from the shadow to live plane control register
5410 * are blocked if the memory self-refresh mode is active at that
5411 * moment. So to make sure the plane gets truly disabled, disable
5412 * first the self-refresh mode. The self-refresh enable bit in turn
5413 * will be checked/applied by the HW only at the next frame start
5414 * event which is after the vblank start event, so we need to have a
5415 * wait-for-vblank between disabling the plane and the pipe.
5416 */
5417 if (HAS_GMCH(dev_priv) &&
5418 intel_set_memory_cxsr(dev_priv, false))
5419 intel_wait_for_vblank(dev_priv, pipe);
5420 }
5421
5422 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5423 const struct intel_crtc_state *new_crtc_state)
5424 {
5425 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5426 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5427
5428 if (!old_crtc_state->ips_enabled)
5429 return false;
5430
5431 if (needs_modeset(&new_crtc_state->base))
5432 return true;
5433
5434 /*
5435 * Workaround : Do not read or write the pipe palette/gamma data while
5436 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5437 *
5438 * Disable IPS before we program the LUT.
5439 */
5440 if (IS_HASWELL(dev_priv) &&
5441 (new_crtc_state->base.color_mgmt_changed ||
5442 new_crtc_state->update_pipe) &&
5443 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5444 return true;
5445
5446 return !new_crtc_state->ips_enabled;
5447 }
5448
5449 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5450 const struct intel_crtc_state *new_crtc_state)
5451 {
5452 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5453 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5454
5455 if (!new_crtc_state->ips_enabled)
5456 return false;
5457
5458 if (needs_modeset(&new_crtc_state->base))
5459 return true;
5460
5461 /*
5462 * Workaround : Do not read or write the pipe palette/gamma data while
5463 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5464 *
5465 * Re-enable IPS after the LUT has been programmed.
5466 */
5467 if (IS_HASWELL(dev_priv) &&
5468 (new_crtc_state->base.color_mgmt_changed ||
5469 new_crtc_state->update_pipe) &&
5470 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5471 return true;
5472
5473 /*
5474 * We can't read out IPS on broadwell, assume the worst and
5475 * forcibly enable IPS on the first fastset.
5476 */
5477 if (new_crtc_state->update_pipe &&
5478 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5479 return true;
5480
5481 return !old_crtc_state->ips_enabled;
5482 }
5483
5484 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5485 const struct intel_crtc_state *crtc_state)
5486 {
5487 if (!crtc_state->nv12_planes)
5488 return false;
5489
5490 /* WA Display #0827: Gen9:all */
5491 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5492 return true;
5493
5494 return false;
5495 }
5496
5497 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5498 {
5499 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5500 struct drm_device *dev = crtc->base.dev;
5501 struct drm_i915_private *dev_priv = to_i915(dev);
5502 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5503 struct intel_crtc_state *pipe_config =
5504 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5505 crtc);
5506 struct drm_plane *primary = crtc->base.primary;
5507 struct drm_plane_state *old_primary_state =
5508 drm_atomic_get_old_plane_state(old_state, primary);
5509
5510 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5511
5512 if (pipe_config->update_wm_post && pipe_config->base.active)
5513 intel_update_watermarks(crtc);
5514
5515 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5516 hsw_enable_ips(pipe_config);
5517
5518 if (old_primary_state) {
5519 struct drm_plane_state *new_primary_state =
5520 drm_atomic_get_new_plane_state(old_state, primary);
5521
5522 intel_fbc_post_update(crtc);
5523
5524 if (new_primary_state->visible &&
5525 (needs_modeset(&pipe_config->base) ||
5526 !old_primary_state->visible))
5527 intel_post_enable_primary(&crtc->base, pipe_config);
5528 }
5529
5530 /* Display WA 827 */
5531 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5532 !needs_nv12_wa(dev_priv, pipe_config)) {
5533 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5534 }
5535 }
5536
5537 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5538 struct intel_crtc_state *pipe_config)
5539 {
5540 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5541 struct drm_device *dev = crtc->base.dev;
5542 struct drm_i915_private *dev_priv = to_i915(dev);
5543 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5544 struct drm_plane *primary = crtc->base.primary;
5545 struct drm_plane_state *old_primary_state =
5546 drm_atomic_get_old_plane_state(old_state, primary);
5547 bool modeset = needs_modeset(&pipe_config->base);
5548 struct intel_atomic_state *old_intel_state =
5549 to_intel_atomic_state(old_state);
5550
5551 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5552 hsw_disable_ips(old_crtc_state);
5553
5554 if (old_primary_state) {
5555 struct intel_plane_state *new_primary_state =
5556 intel_atomic_get_new_plane_state(old_intel_state,
5557 to_intel_plane(primary));
5558
5559 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5560 /*
5561 * Gen2 reports pipe underruns whenever all planes are disabled.
5562 * So disable underrun reporting before all the planes get disabled.
5563 */
5564 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5565 (modeset || !new_primary_state->base.visible))
5566 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5567 }
5568
5569 /* Display WA 827 */
5570 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5571 needs_nv12_wa(dev_priv, pipe_config)) {
5572 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5573 }
5574
5575 /*
5576 * Vblank time updates from the shadow to live plane control register
5577 * are blocked if the memory self-refresh mode is active at that
5578 * moment. So to make sure the plane gets truly disabled, disable
5579 * first the self-refresh mode. The self-refresh enable bit in turn
5580 * will be checked/applied by the HW only at the next frame start
5581 * event which is after the vblank start event, so we need to have a
5582 * wait-for-vblank between disabling the plane and the pipe.
5583 */
5584 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5585 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5586 intel_wait_for_vblank(dev_priv, crtc->pipe);
5587
5588 /*
5589 * IVB workaround: must disable low power watermarks for at least
5590 * one frame before enabling scaling. LP watermarks can be re-enabled
5591 * when scaling is disabled.
5592 *
5593 * WaCxSRDisabledForSpriteScaling:ivb
5594 */
5595 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5596 old_crtc_state->base.active)
5597 intel_wait_for_vblank(dev_priv, crtc->pipe);
5598
5599 /*
5600 * If we're doing a modeset, we're done. No need to do any pre-vblank
5601 * watermark programming here.
5602 */
5603 if (needs_modeset(&pipe_config->base))
5604 return;
5605
5606 /*
5607 * For platforms that support atomic watermarks, program the
5608 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5609 * will be the intermediate values that are safe for both pre- and
5610 * post- vblank; when vblank happens, the 'active' values will be set
5611 * to the final 'target' values and we'll do this again to get the
5612 * optimal watermarks. For gen9+ platforms, the values we program here
5613 * will be the final target values which will get automatically latched
5614 * at vblank time; no further programming will be necessary.
5615 *
5616 * If a platform hasn't been transitioned to atomic watermarks yet,
5617 * we'll continue to update watermarks the old way, if flags tell
5618 * us to.
5619 */
5620 if (dev_priv->display.initial_watermarks != NULL)
5621 dev_priv->display.initial_watermarks(old_intel_state,
5622 pipe_config);
5623 else if (pipe_config->update_wm_pre)
5624 intel_update_watermarks(crtc);
5625 }
5626
5627 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5628 struct intel_crtc *crtc)
5629 {
5630 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5631 const struct intel_crtc_state *new_crtc_state =
5632 intel_atomic_get_new_crtc_state(state, crtc);
5633 unsigned int update_mask = new_crtc_state->update_planes;
5634 const struct intel_plane_state *old_plane_state;
5635 struct intel_plane *plane;
5636 unsigned fb_bits = 0;
5637 int i;
5638
5639 intel_crtc_dpms_overlay_disable(crtc);
5640
5641 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5642 if (crtc->pipe != plane->pipe ||
5643 !(update_mask & BIT(plane->id)))
5644 continue;
5645
5646 intel_disable_plane(plane, new_crtc_state);
5647
5648 if (old_plane_state->base.visible)
5649 fb_bits |= plane->frontbuffer_bit;
5650 }
5651
5652 intel_frontbuffer_flip(dev_priv, fb_bits);
5653 }
5654
5655 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5656 struct intel_crtc_state *crtc_state,
5657 struct drm_atomic_state *old_state)
5658 {
5659 struct drm_connector_state *conn_state;
5660 struct drm_connector *conn;
5661 int i;
5662
5663 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5664 struct intel_encoder *encoder =
5665 to_intel_encoder(conn_state->best_encoder);
5666
5667 if (conn_state->crtc != crtc)
5668 continue;
5669
5670 if (encoder->pre_pll_enable)
5671 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5672 }
5673 }
5674
5675 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5676 struct intel_crtc_state *crtc_state,
5677 struct drm_atomic_state *old_state)
5678 {
5679 struct drm_connector_state *conn_state;
5680 struct drm_connector *conn;
5681 int i;
5682
5683 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5684 struct intel_encoder *encoder =
5685 to_intel_encoder(conn_state->best_encoder);
5686
5687 if (conn_state->crtc != crtc)
5688 continue;
5689
5690 if (encoder->pre_enable)
5691 encoder->pre_enable(encoder, crtc_state, conn_state);
5692 }
5693 }
5694
5695 static void intel_encoders_enable(struct drm_crtc *crtc,
5696 struct intel_crtc_state *crtc_state,
5697 struct drm_atomic_state *old_state)
5698 {
5699 struct drm_connector_state *conn_state;
5700 struct drm_connector *conn;
5701 int i;
5702
5703 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5704 struct intel_encoder *encoder =
5705 to_intel_encoder(conn_state->best_encoder);
5706
5707 if (conn_state->crtc != crtc)
5708 continue;
5709
5710 if (encoder->enable)
5711 encoder->enable(encoder, crtc_state, conn_state);
5712 intel_opregion_notify_encoder(encoder, true);
5713 }
5714 }
5715
5716 static void intel_encoders_disable(struct drm_crtc *crtc,
5717 struct intel_crtc_state *old_crtc_state,
5718 struct drm_atomic_state *old_state)
5719 {
5720 struct drm_connector_state *old_conn_state;
5721 struct drm_connector *conn;
5722 int i;
5723
5724 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5725 struct intel_encoder *encoder =
5726 to_intel_encoder(old_conn_state->best_encoder);
5727
5728 if (old_conn_state->crtc != crtc)
5729 continue;
5730
5731 intel_opregion_notify_encoder(encoder, false);
5732 if (encoder->disable)
5733 encoder->disable(encoder, old_crtc_state, old_conn_state);
5734 }
5735 }
5736
5737 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5738 struct intel_crtc_state *old_crtc_state,
5739 struct drm_atomic_state *old_state)
5740 {
5741 struct drm_connector_state *old_conn_state;
5742 struct drm_connector *conn;
5743 int i;
5744
5745 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5746 struct intel_encoder *encoder =
5747 to_intel_encoder(old_conn_state->best_encoder);
5748
5749 if (old_conn_state->crtc != crtc)
5750 continue;
5751
5752 if (encoder->post_disable)
5753 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5754 }
5755 }
5756
5757 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5758 struct intel_crtc_state *old_crtc_state,
5759 struct drm_atomic_state *old_state)
5760 {
5761 struct drm_connector_state *old_conn_state;
5762 struct drm_connector *conn;
5763 int i;
5764
5765 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5766 struct intel_encoder *encoder =
5767 to_intel_encoder(old_conn_state->best_encoder);
5768
5769 if (old_conn_state->crtc != crtc)
5770 continue;
5771
5772 if (encoder->post_pll_disable)
5773 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5774 }
5775 }
5776
5777 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5778 struct intel_crtc_state *crtc_state,
5779 struct drm_atomic_state *old_state)
5780 {
5781 struct drm_connector_state *conn_state;
5782 struct drm_connector *conn;
5783 int i;
5784
5785 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5786 struct intel_encoder *encoder =
5787 to_intel_encoder(conn_state->best_encoder);
5788
5789 if (conn_state->crtc != crtc)
5790 continue;
5791
5792 if (encoder->update_pipe)
5793 encoder->update_pipe(encoder, crtc_state, conn_state);
5794 }
5795 }
5796
5797 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
5798 {
5799 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5800 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5801
5802 plane->disable_plane(plane, crtc_state);
5803 }
5804
5805 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5806 struct drm_atomic_state *old_state)
5807 {
5808 struct drm_crtc *crtc = pipe_config->base.crtc;
5809 struct drm_device *dev = crtc->dev;
5810 struct drm_i915_private *dev_priv = to_i915(dev);
5811 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5812 int pipe = intel_crtc->pipe;
5813 struct intel_atomic_state *old_intel_state =
5814 to_intel_atomic_state(old_state);
5815
5816 if (WARN_ON(intel_crtc->active))
5817 return;
5818
5819 /*
5820 * Sometimes spurious CPU pipe underruns happen during FDI
5821 * training, at least with VGA+HDMI cloning. Suppress them.
5822 *
5823 * On ILK we get an occasional spurious CPU pipe underruns
5824 * between eDP port A enable and vdd enable. Also PCH port
5825 * enable seems to result in the occasional CPU pipe underrun.
5826 *
5827 * Spurious PCH underruns also occur during PCH enabling.
5828 */
5829 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5830 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5831
5832 if (pipe_config->has_pch_encoder)
5833 intel_prepare_shared_dpll(pipe_config);
5834
5835 if (intel_crtc_has_dp_encoder(pipe_config))
5836 intel_dp_set_m_n(pipe_config, M1_N1);
5837
5838 intel_set_pipe_timings(pipe_config);
5839 intel_set_pipe_src_size(pipe_config);
5840
5841 if (pipe_config->has_pch_encoder) {
5842 intel_cpu_transcoder_set_m_n(pipe_config,
5843 &pipe_config->fdi_m_n, NULL);
5844 }
5845
5846 ironlake_set_pipeconf(pipe_config);
5847
5848 intel_crtc->active = true;
5849
5850 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5851
5852 if (pipe_config->has_pch_encoder) {
5853 /* Note: FDI PLL enabling _must_ be done before we enable the
5854 * cpu pipes, hence this is separate from all the other fdi/pch
5855 * enabling. */
5856 ironlake_fdi_pll_enable(pipe_config);
5857 } else {
5858 assert_fdi_tx_disabled(dev_priv, pipe);
5859 assert_fdi_rx_disabled(dev_priv, pipe);
5860 }
5861
5862 ironlake_pfit_enable(pipe_config);
5863
5864 /*
5865 * On ILK+ LUT must be loaded before the pipe is running but with
5866 * clocks enabled
5867 */
5868 intel_color_load_luts(pipe_config);
5869 intel_color_commit(pipe_config);
5870 /* update DSPCNTR to configure gamma for pipe bottom color */
5871 intel_disable_primary_plane(pipe_config);
5872
5873 if (dev_priv->display.initial_watermarks != NULL)
5874 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5875 intel_enable_pipe(pipe_config);
5876
5877 if (pipe_config->has_pch_encoder)
5878 ironlake_pch_enable(old_intel_state, pipe_config);
5879
5880 assert_vblank_disabled(crtc);
5881 intel_crtc_vblank_on(pipe_config);
5882
5883 intel_encoders_enable(crtc, pipe_config, old_state);
5884
5885 if (HAS_PCH_CPT(dev_priv))
5886 cpt_verify_modeset(dev, intel_crtc->pipe);
5887
5888 /*
5889 * Must wait for vblank to avoid spurious PCH FIFO underruns.
5890 * And a second vblank wait is needed at least on ILK with
5891 * some interlaced HDMI modes. Let's do the double wait always
5892 * in case there are more corner cases we don't know about.
5893 */
5894 if (pipe_config->has_pch_encoder) {
5895 intel_wait_for_vblank(dev_priv, pipe);
5896 intel_wait_for_vblank(dev_priv, pipe);
5897 }
5898 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5899 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5900 }
5901
5902 /* IPS only exists on ULT machines and is tied to pipe A. */
5903 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5904 {
5905 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5906 }
5907
5908 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5909 enum pipe pipe, bool apply)
5910 {
5911 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5912 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5913
5914 if (apply)
5915 val |= mask;
5916 else
5917 val &= ~mask;
5918
5919 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5920 }
5921
5922 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5923 {
5924 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5925 enum pipe pipe = crtc->pipe;
5926 u32 val;
5927
5928 val = MBUS_DBOX_A_CREDIT(2);
5929 val |= MBUS_DBOX_BW_CREDIT(1);
5930 val |= MBUS_DBOX_B_CREDIT(8);
5931
5932 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5933 }
5934
5935 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5936 struct drm_atomic_state *old_state)
5937 {
5938 struct drm_crtc *crtc = pipe_config->base.crtc;
5939 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5940 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5941 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5942 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5943 struct intel_atomic_state *old_intel_state =
5944 to_intel_atomic_state(old_state);
5945 bool psl_clkgate_wa;
5946
5947 if (WARN_ON(intel_crtc->active))
5948 return;
5949
5950 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5951
5952 if (pipe_config->shared_dpll)
5953 intel_enable_shared_dpll(pipe_config);
5954
5955 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5956
5957 if (intel_crtc_has_dp_encoder(pipe_config))
5958 intel_dp_set_m_n(pipe_config, M1_N1);
5959
5960 if (!transcoder_is_dsi(cpu_transcoder))
5961 intel_set_pipe_timings(pipe_config);
5962
5963 intel_set_pipe_src_size(pipe_config);
5964
5965 if (cpu_transcoder != TRANSCODER_EDP &&
5966 !transcoder_is_dsi(cpu_transcoder)) {
5967 I915_WRITE(PIPE_MULT(cpu_transcoder),
5968 pipe_config->pixel_multiplier - 1);
5969 }
5970
5971 if (pipe_config->has_pch_encoder) {
5972 intel_cpu_transcoder_set_m_n(pipe_config,
5973 &pipe_config->fdi_m_n, NULL);
5974 }
5975
5976 if (!transcoder_is_dsi(cpu_transcoder))
5977 haswell_set_pipeconf(pipe_config);
5978
5979 haswell_set_pipemisc(pipe_config);
5980
5981 intel_crtc->active = true;
5982
5983 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5984 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5985 pipe_config->pch_pfit.enabled;
5986 if (psl_clkgate_wa)
5987 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5988
5989 if (INTEL_GEN(dev_priv) >= 9)
5990 skylake_pfit_enable(pipe_config);
5991 else
5992 ironlake_pfit_enable(pipe_config);
5993
5994 /*
5995 * On ILK+ LUT must be loaded before the pipe is running but with
5996 * clocks enabled
5997 */
5998 intel_color_load_luts(pipe_config);
5999 intel_color_commit(pipe_config);
6000 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6001 if (INTEL_GEN(dev_priv) < 9)
6002 intel_disable_primary_plane(pipe_config);
6003
6004 if (INTEL_GEN(dev_priv) >= 11)
6005 icl_set_pipe_chicken(intel_crtc);
6006
6007 intel_ddi_set_pipe_settings(pipe_config);
6008 if (!transcoder_is_dsi(cpu_transcoder))
6009 intel_ddi_enable_transcoder_func(pipe_config);
6010
6011 if (dev_priv->display.initial_watermarks != NULL)
6012 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6013
6014 if (INTEL_GEN(dev_priv) >= 11)
6015 icl_pipe_mbus_enable(intel_crtc);
6016
6017 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6018 if (!transcoder_is_dsi(cpu_transcoder))
6019 intel_enable_pipe(pipe_config);
6020
6021 if (pipe_config->has_pch_encoder)
6022 lpt_pch_enable(old_intel_state, pipe_config);
6023
6024 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6025 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6026
6027 assert_vblank_disabled(crtc);
6028 intel_crtc_vblank_on(pipe_config);
6029
6030 intel_encoders_enable(crtc, pipe_config, old_state);
6031
6032 if (psl_clkgate_wa) {
6033 intel_wait_for_vblank(dev_priv, pipe);
6034 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6035 }
6036
6037 /* If we change the relative order between pipe/planes enabling, we need
6038 * to change the workaround. */
6039 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6040 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6041 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6042 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6043 }
6044 }
6045
6046 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6047 {
6048 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6049 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6050 enum pipe pipe = crtc->pipe;
6051
6052 /* To avoid upsetting the power well on haswell only disable the pfit if
6053 * it's in use. The hw state code will make sure we get this right. */
6054 if (old_crtc_state->pch_pfit.enabled) {
6055 I915_WRITE(PF_CTL(pipe), 0);
6056 I915_WRITE(PF_WIN_POS(pipe), 0);
6057 I915_WRITE(PF_WIN_SZ(pipe), 0);
6058 }
6059 }
6060
6061 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6062 struct drm_atomic_state *old_state)
6063 {
6064 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6065 struct drm_device *dev = crtc->dev;
6066 struct drm_i915_private *dev_priv = to_i915(dev);
6067 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6068 int pipe = intel_crtc->pipe;
6069
6070 /*
6071 * Sometimes spurious CPU pipe underruns happen when the
6072 * pipe is already disabled, but FDI RX/TX is still enabled.
6073 * Happens at least with VGA+HDMI cloning. Suppress them.
6074 */
6075 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6076 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6077
6078 intel_encoders_disable(crtc, old_crtc_state, old_state);
6079
6080 drm_crtc_vblank_off(crtc);
6081 assert_vblank_disabled(crtc);
6082
6083 intel_disable_pipe(old_crtc_state);
6084
6085 ironlake_pfit_disable(old_crtc_state);
6086
6087 if (old_crtc_state->has_pch_encoder)
6088 ironlake_fdi_disable(crtc);
6089
6090 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6091
6092 if (old_crtc_state->has_pch_encoder) {
6093 ironlake_disable_pch_transcoder(dev_priv, pipe);
6094
6095 if (HAS_PCH_CPT(dev_priv)) {
6096 i915_reg_t reg;
6097 u32 temp;
6098
6099 /* disable TRANS_DP_CTL */
6100 reg = TRANS_DP_CTL(pipe);
6101 temp = I915_READ(reg);
6102 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6103 TRANS_DP_PORT_SEL_MASK);
6104 temp |= TRANS_DP_PORT_SEL_NONE;
6105 I915_WRITE(reg, temp);
6106
6107 /* disable DPLL_SEL */
6108 temp = I915_READ(PCH_DPLL_SEL);
6109 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6110 I915_WRITE(PCH_DPLL_SEL, temp);
6111 }
6112
6113 ironlake_fdi_pll_disable(intel_crtc);
6114 }
6115
6116 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6117 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6118 }
6119
6120 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6121 struct drm_atomic_state *old_state)
6122 {
6123 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6124 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6125 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6126 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6127
6128 intel_encoders_disable(crtc, old_crtc_state, old_state);
6129
6130 drm_crtc_vblank_off(crtc);
6131 assert_vblank_disabled(crtc);
6132
6133 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6134 if (!transcoder_is_dsi(cpu_transcoder))
6135 intel_disable_pipe(old_crtc_state);
6136
6137 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6138 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6139
6140 if (!transcoder_is_dsi(cpu_transcoder))
6141 intel_ddi_disable_transcoder_func(old_crtc_state);
6142
6143 intel_dsc_disable(old_crtc_state);
6144
6145 if (INTEL_GEN(dev_priv) >= 9)
6146 skylake_scaler_disable(intel_crtc);
6147 else
6148 ironlake_pfit_disable(old_crtc_state);
6149
6150 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6151
6152 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6153 }
6154
6155 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6156 {
6157 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6159
6160 if (!crtc_state->gmch_pfit.control)
6161 return;
6162
6163 /*
6164 * The panel fitter should only be adjusted whilst the pipe is disabled,
6165 * according to register description and PRM.
6166 */
6167 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6168 assert_pipe_disabled(dev_priv, crtc->pipe);
6169
6170 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6171 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6172
6173 /* Border color in case we don't scale up to the full screen. Black by
6174 * default, change to something else for debugging. */
6175 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6176 }
6177
6178 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6179 {
6180 if (port == PORT_NONE)
6181 return false;
6182
6183 if (IS_ELKHARTLAKE(dev_priv))
6184 return port <= PORT_C;
6185
6186 if (INTEL_GEN(dev_priv) >= 11)
6187 return port <= PORT_B;
6188
6189 return false;
6190 }
6191
6192 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6193 {
6194 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6195 return port >= PORT_C && port <= PORT_F;
6196
6197 return false;
6198 }
6199
6200 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6201 {
6202 if (!intel_port_is_tc(dev_priv, port))
6203 return PORT_TC_NONE;
6204
6205 return port - PORT_C;
6206 }
6207
6208 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6209 {
6210 switch (port) {
6211 case PORT_A:
6212 return POWER_DOMAIN_PORT_DDI_A_LANES;
6213 case PORT_B:
6214 return POWER_DOMAIN_PORT_DDI_B_LANES;
6215 case PORT_C:
6216 return POWER_DOMAIN_PORT_DDI_C_LANES;
6217 case PORT_D:
6218 return POWER_DOMAIN_PORT_DDI_D_LANES;
6219 case PORT_E:
6220 return POWER_DOMAIN_PORT_DDI_E_LANES;
6221 case PORT_F:
6222 return POWER_DOMAIN_PORT_DDI_F_LANES;
6223 default:
6224 MISSING_CASE(port);
6225 return POWER_DOMAIN_PORT_OTHER;
6226 }
6227 }
6228
6229 enum intel_display_power_domain
6230 intel_aux_power_domain(struct intel_digital_port *dig_port)
6231 {
6232 switch (dig_port->aux_ch) {
6233 case AUX_CH_A:
6234 return POWER_DOMAIN_AUX_A;
6235 case AUX_CH_B:
6236 return POWER_DOMAIN_AUX_B;
6237 case AUX_CH_C:
6238 return POWER_DOMAIN_AUX_C;
6239 case AUX_CH_D:
6240 return POWER_DOMAIN_AUX_D;
6241 case AUX_CH_E:
6242 return POWER_DOMAIN_AUX_E;
6243 case AUX_CH_F:
6244 return POWER_DOMAIN_AUX_F;
6245 default:
6246 MISSING_CASE(dig_port->aux_ch);
6247 return POWER_DOMAIN_AUX_A;
6248 }
6249 }
6250
6251 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6252 struct intel_crtc_state *crtc_state)
6253 {
6254 struct drm_device *dev = crtc->dev;
6255 struct drm_i915_private *dev_priv = to_i915(dev);
6256 struct drm_encoder *encoder;
6257 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6258 enum pipe pipe = intel_crtc->pipe;
6259 u64 mask;
6260 enum transcoder transcoder = crtc_state->cpu_transcoder;
6261
6262 if (!crtc_state->base.active)
6263 return 0;
6264
6265 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6266 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6267 if (crtc_state->pch_pfit.enabled ||
6268 crtc_state->pch_pfit.force_thru)
6269 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6270
6271 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6272 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6273
6274 mask |= BIT_ULL(intel_encoder->power_domain);
6275 }
6276
6277 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6278 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6279
6280 if (crtc_state->shared_dpll)
6281 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6282
6283 return mask;
6284 }
6285
6286 static u64
6287 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6288 struct intel_crtc_state *crtc_state)
6289 {
6290 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6291 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6292 enum intel_display_power_domain domain;
6293 u64 domains, new_domains, old_domains;
6294
6295 old_domains = intel_crtc->enabled_power_domains;
6296 intel_crtc->enabled_power_domains = new_domains =
6297 get_crtc_power_domains(crtc, crtc_state);
6298
6299 domains = new_domains & ~old_domains;
6300
6301 for_each_power_domain(domain, domains)
6302 intel_display_power_get(dev_priv, domain);
6303
6304 return old_domains & ~new_domains;
6305 }
6306
6307 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6308 u64 domains)
6309 {
6310 enum intel_display_power_domain domain;
6311
6312 for_each_power_domain(domain, domains)
6313 intel_display_power_put_unchecked(dev_priv, domain);
6314 }
6315
6316 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6317 struct drm_atomic_state *old_state)
6318 {
6319 struct intel_atomic_state *old_intel_state =
6320 to_intel_atomic_state(old_state);
6321 struct drm_crtc *crtc = pipe_config->base.crtc;
6322 struct drm_device *dev = crtc->dev;
6323 struct drm_i915_private *dev_priv = to_i915(dev);
6324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6325 int pipe = intel_crtc->pipe;
6326
6327 if (WARN_ON(intel_crtc->active))
6328 return;
6329
6330 if (intel_crtc_has_dp_encoder(pipe_config))
6331 intel_dp_set_m_n(pipe_config, M1_N1);
6332
6333 intel_set_pipe_timings(pipe_config);
6334 intel_set_pipe_src_size(pipe_config);
6335
6336 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6337 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6338 I915_WRITE(CHV_CANVAS(pipe), 0);
6339 }
6340
6341 i9xx_set_pipeconf(pipe_config);
6342
6343 intel_crtc->active = true;
6344
6345 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6346
6347 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6348
6349 if (IS_CHERRYVIEW(dev_priv)) {
6350 chv_prepare_pll(intel_crtc, pipe_config);
6351 chv_enable_pll(intel_crtc, pipe_config);
6352 } else {
6353 vlv_prepare_pll(intel_crtc, pipe_config);
6354 vlv_enable_pll(intel_crtc, pipe_config);
6355 }
6356
6357 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6358
6359 i9xx_pfit_enable(pipe_config);
6360
6361 intel_color_load_luts(pipe_config);
6362 intel_color_commit(pipe_config);
6363 /* update DSPCNTR to configure gamma for pipe bottom color */
6364 intel_disable_primary_plane(pipe_config);
6365
6366 dev_priv->display.initial_watermarks(old_intel_state,
6367 pipe_config);
6368 intel_enable_pipe(pipe_config);
6369
6370 assert_vblank_disabled(crtc);
6371 intel_crtc_vblank_on(pipe_config);
6372
6373 intel_encoders_enable(crtc, pipe_config, old_state);
6374 }
6375
6376 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6377 {
6378 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6380
6381 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6382 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6383 }
6384
6385 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6386 struct drm_atomic_state *old_state)
6387 {
6388 struct intel_atomic_state *old_intel_state =
6389 to_intel_atomic_state(old_state);
6390 struct drm_crtc *crtc = pipe_config->base.crtc;
6391 struct drm_device *dev = crtc->dev;
6392 struct drm_i915_private *dev_priv = to_i915(dev);
6393 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6394 enum pipe pipe = intel_crtc->pipe;
6395
6396 if (WARN_ON(intel_crtc->active))
6397 return;
6398
6399 i9xx_set_pll_dividers(pipe_config);
6400
6401 if (intel_crtc_has_dp_encoder(pipe_config))
6402 intel_dp_set_m_n(pipe_config, M1_N1);
6403
6404 intel_set_pipe_timings(pipe_config);
6405 intel_set_pipe_src_size(pipe_config);
6406
6407 i9xx_set_pipeconf(pipe_config);
6408
6409 intel_crtc->active = true;
6410
6411 if (!IS_GEN(dev_priv, 2))
6412 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6413
6414 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6415
6416 i9xx_enable_pll(intel_crtc, pipe_config);
6417
6418 i9xx_pfit_enable(pipe_config);
6419
6420 intel_color_load_luts(pipe_config);
6421 intel_color_commit(pipe_config);
6422 /* update DSPCNTR to configure gamma for pipe bottom color */
6423 intel_disable_primary_plane(pipe_config);
6424
6425 if (dev_priv->display.initial_watermarks != NULL)
6426 dev_priv->display.initial_watermarks(old_intel_state,
6427 pipe_config);
6428 else
6429 intel_update_watermarks(intel_crtc);
6430 intel_enable_pipe(pipe_config);
6431
6432 assert_vblank_disabled(crtc);
6433 intel_crtc_vblank_on(pipe_config);
6434
6435 intel_encoders_enable(crtc, pipe_config, old_state);
6436 }
6437
6438 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6439 {
6440 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6442
6443 if (!old_crtc_state->gmch_pfit.control)
6444 return;
6445
6446 assert_pipe_disabled(dev_priv, crtc->pipe);
6447
6448 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6449 I915_READ(PFIT_CONTROL));
6450 I915_WRITE(PFIT_CONTROL, 0);
6451 }
6452
6453 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6454 struct drm_atomic_state *old_state)
6455 {
6456 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6457 struct drm_device *dev = crtc->dev;
6458 struct drm_i915_private *dev_priv = to_i915(dev);
6459 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6460 int pipe = intel_crtc->pipe;
6461
6462 /*
6463 * On gen2 planes are double buffered but the pipe isn't, so we must
6464 * wait for planes to fully turn off before disabling the pipe.
6465 */
6466 if (IS_GEN(dev_priv, 2))
6467 intel_wait_for_vblank(dev_priv, pipe);
6468
6469 intel_encoders_disable(crtc, old_crtc_state, old_state);
6470
6471 drm_crtc_vblank_off(crtc);
6472 assert_vblank_disabled(crtc);
6473
6474 intel_disable_pipe(old_crtc_state);
6475
6476 i9xx_pfit_disable(old_crtc_state);
6477
6478 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6479
6480 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6481 if (IS_CHERRYVIEW(dev_priv))
6482 chv_disable_pll(dev_priv, pipe);
6483 else if (IS_VALLEYVIEW(dev_priv))
6484 vlv_disable_pll(dev_priv, pipe);
6485 else
6486 i9xx_disable_pll(old_crtc_state);
6487 }
6488
6489 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6490
6491 if (!IS_GEN(dev_priv, 2))
6492 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6493
6494 if (!dev_priv->display.initial_watermarks)
6495 intel_update_watermarks(intel_crtc);
6496
6497 /* clock the pipe down to 640x480@60 to potentially save power */
6498 if (IS_I830(dev_priv))
6499 i830_enable_pipe(dev_priv, pipe);
6500 }
6501
6502 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6503 struct drm_modeset_acquire_ctx *ctx)
6504 {
6505 struct intel_encoder *encoder;
6506 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6507 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6508 enum intel_display_power_domain domain;
6509 struct intel_plane *plane;
6510 u64 domains;
6511 struct drm_atomic_state *state;
6512 struct intel_crtc_state *crtc_state;
6513 int ret;
6514
6515 if (!intel_crtc->active)
6516 return;
6517
6518 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6519 const struct intel_plane_state *plane_state =
6520 to_intel_plane_state(plane->base.state);
6521
6522 if (plane_state->base.visible)
6523 intel_plane_disable_noatomic(intel_crtc, plane);
6524 }
6525
6526 state = drm_atomic_state_alloc(crtc->dev);
6527 if (!state) {
6528 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6529 crtc->base.id, crtc->name);
6530 return;
6531 }
6532
6533 state->acquire_ctx = ctx;
6534
6535 /* Everything's already locked, -EDEADLK can't happen. */
6536 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6537 ret = drm_atomic_add_affected_connectors(state, crtc);
6538
6539 WARN_ON(IS_ERR(crtc_state) || ret);
6540
6541 dev_priv->display.crtc_disable(crtc_state, state);
6542
6543 drm_atomic_state_put(state);
6544
6545 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6546 crtc->base.id, crtc->name);
6547
6548 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6549 crtc->state->active = false;
6550 intel_crtc->active = false;
6551 crtc->enabled = false;
6552 crtc->state->connector_mask = 0;
6553 crtc->state->encoder_mask = 0;
6554
6555 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6556 encoder->base.crtc = NULL;
6557
6558 intel_fbc_disable(intel_crtc);
6559 intel_update_watermarks(intel_crtc);
6560 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6561
6562 domains = intel_crtc->enabled_power_domains;
6563 for_each_power_domain(domain, domains)
6564 intel_display_power_put_unchecked(dev_priv, domain);
6565 intel_crtc->enabled_power_domains = 0;
6566
6567 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6568 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6569 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6570 }
6571
6572 /*
6573 * turn all crtc's off, but do not adjust state
6574 * This has to be paired with a call to intel_modeset_setup_hw_state.
6575 */
6576 int intel_display_suspend(struct drm_device *dev)
6577 {
6578 struct drm_i915_private *dev_priv = to_i915(dev);
6579 struct drm_atomic_state *state;
6580 int ret;
6581
6582 state = drm_atomic_helper_suspend(dev);
6583 ret = PTR_ERR_OR_ZERO(state);
6584 if (ret)
6585 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6586 else
6587 dev_priv->modeset_restore_state = state;
6588 return ret;
6589 }
6590
6591 void intel_encoder_destroy(struct drm_encoder *encoder)
6592 {
6593 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6594
6595 drm_encoder_cleanup(encoder);
6596 kfree(intel_encoder);
6597 }
6598
6599 /* Cross check the actual hw state with our own modeset state tracking (and it's
6600 * internal consistency). */
6601 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6602 struct drm_connector_state *conn_state)
6603 {
6604 struct intel_connector *connector = to_intel_connector(conn_state->connector);
6605
6606 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6607 connector->base.base.id,
6608 connector->base.name);
6609
6610 if (connector->get_hw_state(connector)) {
6611 struct intel_encoder *encoder = connector->encoder;
6612
6613 I915_STATE_WARN(!crtc_state,
6614 "connector enabled without attached crtc\n");
6615
6616 if (!crtc_state)
6617 return;
6618
6619 I915_STATE_WARN(!crtc_state->active,
6620 "connector is active, but attached crtc isn't\n");
6621
6622 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6623 return;
6624
6625 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6626 "atomic encoder doesn't match attached encoder\n");
6627
6628 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6629 "attached encoder crtc differs from connector crtc\n");
6630 } else {
6631 I915_STATE_WARN(crtc_state && crtc_state->active,
6632 "attached crtc is active, but connector isn't\n");
6633 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6634 "best encoder set without crtc!\n");
6635 }
6636 }
6637
6638 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6639 {
6640 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6641 return crtc_state->fdi_lanes;
6642
6643 return 0;
6644 }
6645
6646 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6647 struct intel_crtc_state *pipe_config)
6648 {
6649 struct drm_i915_private *dev_priv = to_i915(dev);
6650 struct drm_atomic_state *state = pipe_config->base.state;
6651 struct intel_crtc *other_crtc;
6652 struct intel_crtc_state *other_crtc_state;
6653
6654 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6655 pipe_name(pipe), pipe_config->fdi_lanes);
6656 if (pipe_config->fdi_lanes > 4) {
6657 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6658 pipe_name(pipe), pipe_config->fdi_lanes);
6659 return -EINVAL;
6660 }
6661
6662 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6663 if (pipe_config->fdi_lanes > 2) {
6664 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6665 pipe_config->fdi_lanes);
6666 return -EINVAL;
6667 } else {
6668 return 0;
6669 }
6670 }
6671
6672 if (INTEL_INFO(dev_priv)->num_pipes == 2)
6673 return 0;
6674
6675 /* Ivybridge 3 pipe is really complicated */
6676 switch (pipe) {
6677 case PIPE_A:
6678 return 0;
6679 case PIPE_B:
6680 if (pipe_config->fdi_lanes <= 2)
6681 return 0;
6682
6683 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6684 other_crtc_state =
6685 intel_atomic_get_crtc_state(state, other_crtc);
6686 if (IS_ERR(other_crtc_state))
6687 return PTR_ERR(other_crtc_state);
6688
6689 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6690 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6691 pipe_name(pipe), pipe_config->fdi_lanes);
6692 return -EINVAL;
6693 }
6694 return 0;
6695 case PIPE_C:
6696 if (pipe_config->fdi_lanes > 2) {
6697 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6698 pipe_name(pipe), pipe_config->fdi_lanes);
6699 return -EINVAL;
6700 }
6701
6702 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6703 other_crtc_state =
6704 intel_atomic_get_crtc_state(state, other_crtc);
6705 if (IS_ERR(other_crtc_state))
6706 return PTR_ERR(other_crtc_state);
6707
6708 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6709 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6710 return -EINVAL;
6711 }
6712 return 0;
6713 default:
6714 BUG();
6715 }
6716 }
6717
6718 #define RETRY 1
6719 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6720 struct intel_crtc_state *pipe_config)
6721 {
6722 struct drm_device *dev = intel_crtc->base.dev;
6723 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6724 int lane, link_bw, fdi_dotclock, ret;
6725 bool needs_recompute = false;
6726
6727 retry:
6728 /* FDI is a binary signal running at ~2.7GHz, encoding
6729 * each output octet as 10 bits. The actual frequency
6730 * is stored as a divider into a 100MHz clock, and the
6731 * mode pixel clock is stored in units of 1KHz.
6732 * Hence the bw of each lane in terms of the mode signal
6733 * is:
6734 */
6735 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6736
6737 fdi_dotclock = adjusted_mode->crtc_clock;
6738
6739 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6740 pipe_config->pipe_bpp);
6741
6742 pipe_config->fdi_lanes = lane;
6743
6744 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6745 link_bw, &pipe_config->fdi_m_n, false);
6746
6747 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6748 if (ret == -EDEADLK)
6749 return ret;
6750
6751 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6752 pipe_config->pipe_bpp -= 2*3;
6753 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6754 pipe_config->pipe_bpp);
6755 needs_recompute = true;
6756 pipe_config->bw_constrained = true;
6757
6758 goto retry;
6759 }
6760
6761 if (needs_recompute)
6762 return RETRY;
6763
6764 return ret;
6765 }
6766
6767 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6768 {
6769 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6771
6772 /* IPS only exists on ULT machines and is tied to pipe A. */
6773 if (!hsw_crtc_supports_ips(crtc))
6774 return false;
6775
6776 if (!i915_modparams.enable_ips)
6777 return false;
6778
6779 if (crtc_state->pipe_bpp > 24)
6780 return false;
6781
6782 /*
6783 * We compare against max which means we must take
6784 * the increased cdclk requirement into account when
6785 * calculating the new cdclk.
6786 *
6787 * Should measure whether using a lower cdclk w/o IPS
6788 */
6789 if (IS_BROADWELL(dev_priv) &&
6790 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6791 return false;
6792
6793 return true;
6794 }
6795
6796 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6797 {
6798 struct drm_i915_private *dev_priv =
6799 to_i915(crtc_state->base.crtc->dev);
6800 struct intel_atomic_state *intel_state =
6801 to_intel_atomic_state(crtc_state->base.state);
6802
6803 if (!hsw_crtc_state_ips_capable(crtc_state))
6804 return false;
6805
6806 /*
6807 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
6808 * enabled and disabled dynamically based on package C states,
6809 * user space can't make reliable use of the CRCs, so let's just
6810 * completely disable it.
6811 */
6812 if (crtc_state->crc_enabled)
6813 return false;
6814
6815 /* IPS should be fine as long as at least one plane is enabled. */
6816 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6817 return false;
6818
6819 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6820 if (IS_BROADWELL(dev_priv) &&
6821 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6822 return false;
6823
6824 return true;
6825 }
6826
6827 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6828 {
6829 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6830
6831 /* GDG double wide on either pipe, otherwise pipe A only */
6832 return INTEL_GEN(dev_priv) < 4 &&
6833 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6834 }
6835
6836 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6837 {
6838 u32 pixel_rate;
6839
6840 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6841
6842 /*
6843 * We only use IF-ID interlacing. If we ever use
6844 * PF-ID we'll need to adjust the pixel_rate here.
6845 */
6846
6847 if (pipe_config->pch_pfit.enabled) {
6848 u64 pipe_w, pipe_h, pfit_w, pfit_h;
6849 u32 pfit_size = pipe_config->pch_pfit.size;
6850
6851 pipe_w = pipe_config->pipe_src_w;
6852 pipe_h = pipe_config->pipe_src_h;
6853
6854 pfit_w = (pfit_size >> 16) & 0xFFFF;
6855 pfit_h = pfit_size & 0xFFFF;
6856 if (pipe_w < pfit_w)
6857 pipe_w = pfit_w;
6858 if (pipe_h < pfit_h)
6859 pipe_h = pfit_h;
6860
6861 if (WARN_ON(!pfit_w || !pfit_h))
6862 return pixel_rate;
6863
6864 pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
6865 pfit_w * pfit_h);
6866 }
6867
6868 return pixel_rate;
6869 }
6870
6871 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6872 {
6873 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6874
6875 if (HAS_GMCH(dev_priv))
6876 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6877 crtc_state->pixel_rate =
6878 crtc_state->base.adjusted_mode.crtc_clock;
6879 else
6880 crtc_state->pixel_rate =
6881 ilk_pipe_pixel_rate(crtc_state);
6882 }
6883
6884 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6885 struct intel_crtc_state *pipe_config)
6886 {
6887 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6888 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6889 int clock_limit = dev_priv->max_dotclk_freq;
6890
6891 if (INTEL_GEN(dev_priv) < 4) {
6892 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6893
6894 /*
6895 * Enable double wide mode when the dot clock
6896 * is > 90% of the (display) core speed.
6897 */
6898 if (intel_crtc_supports_double_wide(crtc) &&
6899 adjusted_mode->crtc_clock > clock_limit) {
6900 clock_limit = dev_priv->max_dotclk_freq;
6901 pipe_config->double_wide = true;
6902 }
6903 }
6904
6905 if (adjusted_mode->crtc_clock > clock_limit) {
6906 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6907 adjusted_mode->crtc_clock, clock_limit,
6908 yesno(pipe_config->double_wide));
6909 return -EINVAL;
6910 }
6911
6912 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6913 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6914 pipe_config->base.ctm) {
6915 /*
6916 * There is only one pipe CSC unit per pipe, and we need that
6917 * for output conversion from RGB->YCBCR. So if CTM is already
6918 * applied we can't support YCBCR420 output.
6919 */
6920 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6921 return -EINVAL;
6922 }
6923
6924 /*
6925 * Pipe horizontal size must be even in:
6926 * - DVO ganged mode
6927 * - LVDS dual channel mode
6928 * - Double wide pipe
6929 */
6930 if (pipe_config->pipe_src_w & 1) {
6931 if (pipe_config->double_wide) {
6932 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6933 return -EINVAL;
6934 }
6935
6936 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6937 intel_is_dual_link_lvds(dev_priv)) {
6938 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6939 return -EINVAL;
6940 }
6941 }
6942
6943 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6944 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6945 */
6946 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6947 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6948 return -EINVAL;
6949
6950 intel_crtc_compute_pixel_rate(pipe_config);
6951
6952 if (pipe_config->has_pch_encoder)
6953 return ironlake_fdi_compute_config(crtc, pipe_config);
6954
6955 return 0;
6956 }
6957
6958 static void
6959 intel_reduce_m_n_ratio(u32 *num, u32 *den)
6960 {
6961 while (*num > DATA_LINK_M_N_MASK ||
6962 *den > DATA_LINK_M_N_MASK) {
6963 *num >>= 1;
6964 *den >>= 1;
6965 }
6966 }
6967
6968 static void compute_m_n(unsigned int m, unsigned int n,
6969 u32 *ret_m, u32 *ret_n,
6970 bool constant_n)
6971 {
6972 /*
6973 * Several DP dongles in particular seem to be fussy about
6974 * too large link M/N values. Give N value as 0x8000 that
6975 * should be acceptable by specific devices. 0x8000 is the
6976 * specified fixed N value for asynchronous clock mode,
6977 * which the devices expect also in synchronous clock mode.
6978 */
6979 if (constant_n)
6980 *ret_n = 0x8000;
6981 else
6982 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6983
6984 *ret_m = div_u64((u64)m * *ret_n, n);
6985 intel_reduce_m_n_ratio(ret_m, ret_n);
6986 }
6987
6988 void
6989 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
6990 int pixel_clock, int link_clock,
6991 struct intel_link_m_n *m_n,
6992 bool constant_n)
6993 {
6994 m_n->tu = 64;
6995
6996 compute_m_n(bits_per_pixel * pixel_clock,
6997 link_clock * nlanes * 8,
6998 &m_n->gmch_m, &m_n->gmch_n,
6999 constant_n);
7000
7001 compute_m_n(pixel_clock, link_clock,
7002 &m_n->link_m, &m_n->link_n,
7003 constant_n);
7004 }
7005
7006 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7007 {
7008 if (i915_modparams.panel_use_ssc >= 0)
7009 return i915_modparams.panel_use_ssc != 0;
7010 return dev_priv->vbt.lvds_use_ssc
7011 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7012 }
7013
7014 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7015 {
7016 return (1 << dpll->n) << 16 | dpll->m2;
7017 }
7018
7019 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7020 {
7021 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7022 }
7023
7024 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7025 struct intel_crtc_state *crtc_state,
7026 struct dpll *reduced_clock)
7027 {
7028 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7029 u32 fp, fp2 = 0;
7030
7031 if (IS_PINEVIEW(dev_priv)) {
7032 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7033 if (reduced_clock)
7034 fp2 = pnv_dpll_compute_fp(reduced_clock);
7035 } else {
7036 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7037 if (reduced_clock)
7038 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7039 }
7040
7041 crtc_state->dpll_hw_state.fp0 = fp;
7042
7043 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7044 reduced_clock) {
7045 crtc_state->dpll_hw_state.fp1 = fp2;
7046 } else {
7047 crtc_state->dpll_hw_state.fp1 = fp;
7048 }
7049 }
7050
7051 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7052 pipe)
7053 {
7054 u32 reg_val;
7055
7056 /*
7057 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7058 * and set it to a reasonable value instead.
7059 */
7060 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7061 reg_val &= 0xffffff00;
7062 reg_val |= 0x00000030;
7063 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7064
7065 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7066 reg_val &= 0x00ffffff;
7067 reg_val |= 0x8c000000;
7068 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7069
7070 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7071 reg_val &= 0xffffff00;
7072 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7073
7074 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7075 reg_val &= 0x00ffffff;
7076 reg_val |= 0xb0000000;
7077 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7078 }
7079
7080 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7081 const struct intel_link_m_n *m_n)
7082 {
7083 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7084 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7085 enum pipe pipe = crtc->pipe;
7086
7087 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7088 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7089 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7090 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7091 }
7092
7093 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7094 enum transcoder transcoder)
7095 {
7096 if (IS_HASWELL(dev_priv))
7097 return transcoder == TRANSCODER_EDP;
7098
7099 /*
7100 * Strictly speaking some registers are available before
7101 * gen7, but we only support DRRS on gen7+
7102 */
7103 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7104 }
7105
7106 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7107 const struct intel_link_m_n *m_n,
7108 const struct intel_link_m_n *m2_n2)
7109 {
7110 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7111 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7112 enum pipe pipe = crtc->pipe;
7113 enum transcoder transcoder = crtc_state->cpu_transcoder;
7114
7115 if (INTEL_GEN(dev_priv) >= 5) {
7116 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7117 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7118 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7119 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7120 /*
7121 * M2_N2 registers are set only if DRRS is supported
7122 * (to make sure the registers are not unnecessarily accessed).
7123 */
7124 if (m2_n2 && crtc_state->has_drrs &&
7125 transcoder_has_m2_n2(dev_priv, transcoder)) {
7126 I915_WRITE(PIPE_DATA_M2(transcoder),
7127 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7128 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7129 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7130 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7131 }
7132 } else {
7133 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7134 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7135 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7136 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7137 }
7138 }
7139
7140 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7141 {
7142 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7143
7144 if (m_n == M1_N1) {
7145 dp_m_n = &crtc_state->dp_m_n;
7146 dp_m2_n2 = &crtc_state->dp_m2_n2;
7147 } else if (m_n == M2_N2) {
7148
7149 /*
7150 * M2_N2 registers are not supported. Hence m2_n2 divider value
7151 * needs to be programmed into M1_N1.
7152 */
7153 dp_m_n = &crtc_state->dp_m2_n2;
7154 } else {
7155 DRM_ERROR("Unsupported divider value\n");
7156 return;
7157 }
7158
7159 if (crtc_state->has_pch_encoder)
7160 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7161 else
7162 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7163 }
7164
7165 static void vlv_compute_dpll(struct intel_crtc *crtc,
7166 struct intel_crtc_state *pipe_config)
7167 {
7168 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7169 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7170 if (crtc->pipe != PIPE_A)
7171 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7172
7173 /* DPLL not used with DSI, but still need the rest set up */
7174 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7175 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7176 DPLL_EXT_BUFFER_ENABLE_VLV;
7177
7178 pipe_config->dpll_hw_state.dpll_md =
7179 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7180 }
7181
7182 static void chv_compute_dpll(struct intel_crtc *crtc,
7183 struct intel_crtc_state *pipe_config)
7184 {
7185 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7186 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7187 if (crtc->pipe != PIPE_A)
7188 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7189
7190 /* DPLL not used with DSI, but still need the rest set up */
7191 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7192 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7193
7194 pipe_config->dpll_hw_state.dpll_md =
7195 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7196 }
7197
7198 static void vlv_prepare_pll(struct intel_crtc *crtc,
7199 const struct intel_crtc_state *pipe_config)
7200 {
7201 struct drm_device *dev = crtc->base.dev;
7202 struct drm_i915_private *dev_priv = to_i915(dev);
7203 enum pipe pipe = crtc->pipe;
7204 u32 mdiv;
7205 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7206 u32 coreclk, reg_val;
7207
7208 /* Enable Refclk */
7209 I915_WRITE(DPLL(pipe),
7210 pipe_config->dpll_hw_state.dpll &
7211 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7212
7213 /* No need to actually set up the DPLL with DSI */
7214 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7215 return;
7216
7217 mutex_lock(&dev_priv->sb_lock);
7218
7219 bestn = pipe_config->dpll.n;
7220 bestm1 = pipe_config->dpll.m1;
7221 bestm2 = pipe_config->dpll.m2;
7222 bestp1 = pipe_config->dpll.p1;
7223 bestp2 = pipe_config->dpll.p2;
7224
7225 /* See eDP HDMI DPIO driver vbios notes doc */
7226
7227 /* PLL B needs special handling */
7228 if (pipe == PIPE_B)
7229 vlv_pllb_recal_opamp(dev_priv, pipe);
7230
7231 /* Set up Tx target for periodic Rcomp update */
7232 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7233
7234 /* Disable target IRef on PLL */
7235 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7236 reg_val &= 0x00ffffff;
7237 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7238
7239 /* Disable fast lock */
7240 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7241
7242 /* Set idtafcrecal before PLL is enabled */
7243 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7244 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7245 mdiv |= ((bestn << DPIO_N_SHIFT));
7246 mdiv |= (1 << DPIO_K_SHIFT);
7247
7248 /*
7249 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7250 * but we don't support that).
7251 * Note: don't use the DAC post divider as it seems unstable.
7252 */
7253 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7254 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7255
7256 mdiv |= DPIO_ENABLE_CALIBRATION;
7257 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7258
7259 /* Set HBR and RBR LPF coefficients */
7260 if (pipe_config->port_clock == 162000 ||
7261 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7262 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7263 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7264 0x009f0003);
7265 else
7266 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7267 0x00d0000f);
7268
7269 if (intel_crtc_has_dp_encoder(pipe_config)) {
7270 /* Use SSC source */
7271 if (pipe == PIPE_A)
7272 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7273 0x0df40000);
7274 else
7275 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7276 0x0df70000);
7277 } else { /* HDMI or VGA */
7278 /* Use bend source */
7279 if (pipe == PIPE_A)
7280 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7281 0x0df70000);
7282 else
7283 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7284 0x0df40000);
7285 }
7286
7287 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7288 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7289 if (intel_crtc_has_dp_encoder(pipe_config))
7290 coreclk |= 0x01000000;
7291 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7292
7293 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7294 mutex_unlock(&dev_priv->sb_lock);
7295 }
7296
7297 static void chv_prepare_pll(struct intel_crtc *crtc,
7298 const struct intel_crtc_state *pipe_config)
7299 {
7300 struct drm_device *dev = crtc->base.dev;
7301 struct drm_i915_private *dev_priv = to_i915(dev);
7302 enum pipe pipe = crtc->pipe;
7303 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7304 u32 loopfilter, tribuf_calcntr;
7305 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7306 u32 dpio_val;
7307 int vco;
7308
7309 /* Enable Refclk and SSC */
7310 I915_WRITE(DPLL(pipe),
7311 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7312
7313 /* No need to actually set up the DPLL with DSI */
7314 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7315 return;
7316
7317 bestn = pipe_config->dpll.n;
7318 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7319 bestm1 = pipe_config->dpll.m1;
7320 bestm2 = pipe_config->dpll.m2 >> 22;
7321 bestp1 = pipe_config->dpll.p1;
7322 bestp2 = pipe_config->dpll.p2;
7323 vco = pipe_config->dpll.vco;
7324 dpio_val = 0;
7325 loopfilter = 0;
7326
7327 mutex_lock(&dev_priv->sb_lock);
7328
7329 /* p1 and p2 divider */
7330 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7331 5 << DPIO_CHV_S1_DIV_SHIFT |
7332 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7333 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7334 1 << DPIO_CHV_K_DIV_SHIFT);
7335
7336 /* Feedback post-divider - m2 */
7337 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7338
7339 /* Feedback refclk divider - n and m1 */
7340 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7341 DPIO_CHV_M1_DIV_BY_2 |
7342 1 << DPIO_CHV_N_DIV_SHIFT);
7343
7344 /* M2 fraction division */
7345 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7346
7347 /* M2 fraction division enable */
7348 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7349 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7350 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7351 if (bestm2_frac)
7352 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7353 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7354
7355 /* Program digital lock detect threshold */
7356 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7357 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7358 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7359 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7360 if (!bestm2_frac)
7361 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7362 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7363
7364 /* Loop filter */
7365 if (vco == 5400000) {
7366 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7367 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7368 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7369 tribuf_calcntr = 0x9;
7370 } else if (vco <= 6200000) {
7371 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7372 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7373 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7374 tribuf_calcntr = 0x9;
7375 } else if (vco <= 6480000) {
7376 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7377 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7378 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7379 tribuf_calcntr = 0x8;
7380 } else {
7381 /* Not supported. Apply the same limits as in the max case */
7382 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7383 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7384 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7385 tribuf_calcntr = 0;
7386 }
7387 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7388
7389 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7390 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7391 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7392 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7393
7394 /* AFC Recal */
7395 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7396 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7397 DPIO_AFC_RECAL);
7398
7399 mutex_unlock(&dev_priv->sb_lock);
7400 }
7401
7402 /**
7403 * vlv_force_pll_on - forcibly enable just the PLL
7404 * @dev_priv: i915 private structure
7405 * @pipe: pipe PLL to enable
7406 * @dpll: PLL configuration
7407 *
7408 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7409 * in cases where we need the PLL enabled even when @pipe is not going to
7410 * be enabled.
7411 */
7412 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7413 const struct dpll *dpll)
7414 {
7415 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7416 struct intel_crtc_state *pipe_config;
7417
7418 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7419 if (!pipe_config)
7420 return -ENOMEM;
7421
7422 pipe_config->base.crtc = &crtc->base;
7423 pipe_config->pixel_multiplier = 1;
7424 pipe_config->dpll = *dpll;
7425
7426 if (IS_CHERRYVIEW(dev_priv)) {
7427 chv_compute_dpll(crtc, pipe_config);
7428 chv_prepare_pll(crtc, pipe_config);
7429 chv_enable_pll(crtc, pipe_config);
7430 } else {
7431 vlv_compute_dpll(crtc, pipe_config);
7432 vlv_prepare_pll(crtc, pipe_config);
7433 vlv_enable_pll(crtc, pipe_config);
7434 }
7435
7436 kfree(pipe_config);
7437
7438 return 0;
7439 }
7440
7441 /**
7442 * vlv_force_pll_off - forcibly disable just the PLL
7443 * @dev_priv: i915 private structure
7444 * @pipe: pipe PLL to disable
7445 *
7446 * Disable the PLL for @pipe. To be used in cases where we need
7447 * the PLL enabled even when @pipe is not going to be enabled.
7448 */
7449 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7450 {
7451 if (IS_CHERRYVIEW(dev_priv))
7452 chv_disable_pll(dev_priv, pipe);
7453 else
7454 vlv_disable_pll(dev_priv, pipe);
7455 }
7456
7457 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7458 struct intel_crtc_state *crtc_state,
7459 struct dpll *reduced_clock)
7460 {
7461 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7462 u32 dpll;
7463 struct dpll *clock = &crtc_state->dpll;
7464
7465 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7466
7467 dpll = DPLL_VGA_MODE_DIS;
7468
7469 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7470 dpll |= DPLLB_MODE_LVDS;
7471 else
7472 dpll |= DPLLB_MODE_DAC_SERIAL;
7473
7474 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7475 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7476 dpll |= (crtc_state->pixel_multiplier - 1)
7477 << SDVO_MULTIPLIER_SHIFT_HIRES;
7478 }
7479
7480 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7481 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7482 dpll |= DPLL_SDVO_HIGH_SPEED;
7483
7484 if (intel_crtc_has_dp_encoder(crtc_state))
7485 dpll |= DPLL_SDVO_HIGH_SPEED;
7486
7487 /* compute bitmask from p1 value */
7488 if (IS_PINEVIEW(dev_priv))
7489 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7490 else {
7491 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7492 if (IS_G4X(dev_priv) && reduced_clock)
7493 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7494 }
7495 switch (clock->p2) {
7496 case 5:
7497 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7498 break;
7499 case 7:
7500 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7501 break;
7502 case 10:
7503 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7504 break;
7505 case 14:
7506 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7507 break;
7508 }
7509 if (INTEL_GEN(dev_priv) >= 4)
7510 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7511
7512 if (crtc_state->sdvo_tv_clock)
7513 dpll |= PLL_REF_INPUT_TVCLKINBC;
7514 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7515 intel_panel_use_ssc(dev_priv))
7516 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7517 else
7518 dpll |= PLL_REF_INPUT_DREFCLK;
7519
7520 dpll |= DPLL_VCO_ENABLE;
7521 crtc_state->dpll_hw_state.dpll = dpll;
7522
7523 if (INTEL_GEN(dev_priv) >= 4) {
7524 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7525 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7526 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7527 }
7528 }
7529
7530 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7531 struct intel_crtc_state *crtc_state,
7532 struct dpll *reduced_clock)
7533 {
7534 struct drm_device *dev = crtc->base.dev;
7535 struct drm_i915_private *dev_priv = to_i915(dev);
7536 u32 dpll;
7537 struct dpll *clock = &crtc_state->dpll;
7538
7539 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7540
7541 dpll = DPLL_VGA_MODE_DIS;
7542
7543 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7544 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7545 } else {
7546 if (clock->p1 == 2)
7547 dpll |= PLL_P1_DIVIDE_BY_TWO;
7548 else
7549 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7550 if (clock->p2 == 4)
7551 dpll |= PLL_P2_DIVIDE_BY_4;
7552 }
7553
7554 /*
7555 * Bspec:
7556 * "[Almador Errata}: For the correct operation of the muxed DVO pins
7557 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7558 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7559 * Enable) must be set to “1” in both the DPLL A Control Register
7560 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7561 *
7562 * For simplicity We simply keep both bits always enabled in
7563 * both DPLLS. The spec says we should disable the DVO 2X clock
7564 * when not needed, but this seems to work fine in practice.
7565 */
7566 if (IS_I830(dev_priv) ||
7567 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7568 dpll |= DPLL_DVO_2X_MODE;
7569
7570 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7571 intel_panel_use_ssc(dev_priv))
7572 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7573 else
7574 dpll |= PLL_REF_INPUT_DREFCLK;
7575
7576 dpll |= DPLL_VCO_ENABLE;
7577 crtc_state->dpll_hw_state.dpll = dpll;
7578 }
7579
7580 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7581 {
7582 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7583 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7584 enum pipe pipe = crtc->pipe;
7585 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7586 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7587 u32 crtc_vtotal, crtc_vblank_end;
7588 int vsyncshift = 0;
7589
7590 /* We need to be careful not to changed the adjusted mode, for otherwise
7591 * the hw state checker will get angry at the mismatch. */
7592 crtc_vtotal = adjusted_mode->crtc_vtotal;
7593 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7594
7595 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7596 /* the chip adds 2 halflines automatically */
7597 crtc_vtotal -= 1;
7598 crtc_vblank_end -= 1;
7599
7600 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7601 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7602 else
7603 vsyncshift = adjusted_mode->crtc_hsync_start -
7604 adjusted_mode->crtc_htotal / 2;
7605 if (vsyncshift < 0)
7606 vsyncshift += adjusted_mode->crtc_htotal;
7607 }
7608
7609 if (INTEL_GEN(dev_priv) > 3)
7610 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7611
7612 I915_WRITE(HTOTAL(cpu_transcoder),
7613 (adjusted_mode->crtc_hdisplay - 1) |
7614 ((adjusted_mode->crtc_htotal - 1) << 16));
7615 I915_WRITE(HBLANK(cpu_transcoder),
7616 (adjusted_mode->crtc_hblank_start - 1) |
7617 ((adjusted_mode->crtc_hblank_end - 1) << 16));
7618 I915_WRITE(HSYNC(cpu_transcoder),
7619 (adjusted_mode->crtc_hsync_start - 1) |
7620 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7621
7622 I915_WRITE(VTOTAL(cpu_transcoder),
7623 (adjusted_mode->crtc_vdisplay - 1) |
7624 ((crtc_vtotal - 1) << 16));
7625 I915_WRITE(VBLANK(cpu_transcoder),
7626 (adjusted_mode->crtc_vblank_start - 1) |
7627 ((crtc_vblank_end - 1) << 16));
7628 I915_WRITE(VSYNC(cpu_transcoder),
7629 (adjusted_mode->crtc_vsync_start - 1) |
7630 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7631
7632 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7633 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7634 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7635 * bits. */
7636 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7637 (pipe == PIPE_B || pipe == PIPE_C))
7638 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7639
7640 }
7641
7642 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7643 {
7644 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7645 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7646 enum pipe pipe = crtc->pipe;
7647
7648 /* pipesrc controls the size that is scaled from, which should
7649 * always be the user's requested size.
7650 */
7651 I915_WRITE(PIPESRC(pipe),
7652 ((crtc_state->pipe_src_w - 1) << 16) |
7653 (crtc_state->pipe_src_h - 1));
7654 }
7655
7656 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7657 struct intel_crtc_state *pipe_config)
7658 {
7659 struct drm_device *dev = crtc->base.dev;
7660 struct drm_i915_private *dev_priv = to_i915(dev);
7661 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7662 u32 tmp;
7663
7664 tmp = I915_READ(HTOTAL(cpu_transcoder));
7665 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7666 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7667 tmp = I915_READ(HBLANK(cpu_transcoder));
7668 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7669 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7670 tmp = I915_READ(HSYNC(cpu_transcoder));
7671 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7672 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7673
7674 tmp = I915_READ(VTOTAL(cpu_transcoder));
7675 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7676 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7677 tmp = I915_READ(VBLANK(cpu_transcoder));
7678 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7679 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7680 tmp = I915_READ(VSYNC(cpu_transcoder));
7681 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7682 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7683
7684 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7685 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7686 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7687 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7688 }
7689 }
7690
7691 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7692 struct intel_crtc_state *pipe_config)
7693 {
7694 struct drm_device *dev = crtc->base.dev;
7695 struct drm_i915_private *dev_priv = to_i915(dev);
7696 u32 tmp;
7697
7698 tmp = I915_READ(PIPESRC(crtc->pipe));
7699 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7700 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7701
7702 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7703 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7704 }
7705
7706 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7707 struct intel_crtc_state *pipe_config)
7708 {
7709 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7710 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7711 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7712 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7713
7714 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7715 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7716 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7717 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7718
7719 mode->flags = pipe_config->base.adjusted_mode.flags;
7720 mode->type = DRM_MODE_TYPE_DRIVER;
7721
7722 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7723
7724 mode->hsync = drm_mode_hsync(mode);
7725 mode->vrefresh = drm_mode_vrefresh(mode);
7726 drm_mode_set_name(mode);
7727 }
7728
7729 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7730 {
7731 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7732 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7733 u32 pipeconf;
7734
7735 pipeconf = 0;
7736
7737 /* we keep both pipes enabled on 830 */
7738 if (IS_I830(dev_priv))
7739 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7740
7741 if (crtc_state->double_wide)
7742 pipeconf |= PIPECONF_DOUBLE_WIDE;
7743
7744 /* only g4x and later have fancy bpc/dither controls */
7745 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7746 IS_CHERRYVIEW(dev_priv)) {
7747 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7748 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7749 pipeconf |= PIPECONF_DITHER_EN |
7750 PIPECONF_DITHER_TYPE_SP;
7751
7752 switch (crtc_state->pipe_bpp) {
7753 case 18:
7754 pipeconf |= PIPECONF_6BPC;
7755 break;
7756 case 24:
7757 pipeconf |= PIPECONF_8BPC;
7758 break;
7759 case 30:
7760 pipeconf |= PIPECONF_10BPC;
7761 break;
7762 default:
7763 /* Case prevented by intel_choose_pipe_bpp_dither. */
7764 BUG();
7765 }
7766 }
7767
7768 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7769 if (INTEL_GEN(dev_priv) < 4 ||
7770 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7771 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7772 else
7773 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7774 } else {
7775 pipeconf |= PIPECONF_PROGRESSIVE;
7776 }
7777
7778 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7779 crtc_state->limited_color_range)
7780 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7781
7782 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
7783
7784 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7785 POSTING_READ(PIPECONF(crtc->pipe));
7786 }
7787
7788 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7789 struct intel_crtc_state *crtc_state)
7790 {
7791 struct drm_device *dev = crtc->base.dev;
7792 struct drm_i915_private *dev_priv = to_i915(dev);
7793 const struct intel_limit *limit;
7794 int refclk = 48000;
7795
7796 memset(&crtc_state->dpll_hw_state, 0,
7797 sizeof(crtc_state->dpll_hw_state));
7798
7799 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7800 if (intel_panel_use_ssc(dev_priv)) {
7801 refclk = dev_priv->vbt.lvds_ssc_freq;
7802 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7803 }
7804
7805 limit = &intel_limits_i8xx_lvds;
7806 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7807 limit = &intel_limits_i8xx_dvo;
7808 } else {
7809 limit = &intel_limits_i8xx_dac;
7810 }
7811
7812 if (!crtc_state->clock_set &&
7813 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7814 refclk, NULL, &crtc_state->dpll)) {
7815 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7816 return -EINVAL;
7817 }
7818
7819 i8xx_compute_dpll(crtc, crtc_state, NULL);
7820
7821 return 0;
7822 }
7823
7824 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7825 struct intel_crtc_state *crtc_state)
7826 {
7827 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7828 const struct intel_limit *limit;
7829 int refclk = 96000;
7830
7831 memset(&crtc_state->dpll_hw_state, 0,
7832 sizeof(crtc_state->dpll_hw_state));
7833
7834 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7835 if (intel_panel_use_ssc(dev_priv)) {
7836 refclk = dev_priv->vbt.lvds_ssc_freq;
7837 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7838 }
7839
7840 if (intel_is_dual_link_lvds(dev_priv))
7841 limit = &intel_limits_g4x_dual_channel_lvds;
7842 else
7843 limit = &intel_limits_g4x_single_channel_lvds;
7844 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7845 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7846 limit = &intel_limits_g4x_hdmi;
7847 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7848 limit = &intel_limits_g4x_sdvo;
7849 } else {
7850 /* The option is for other outputs */
7851 limit = &intel_limits_i9xx_sdvo;
7852 }
7853
7854 if (!crtc_state->clock_set &&
7855 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7856 refclk, NULL, &crtc_state->dpll)) {
7857 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7858 return -EINVAL;
7859 }
7860
7861 i9xx_compute_dpll(crtc, crtc_state, NULL);
7862
7863 return 0;
7864 }
7865
7866 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7867 struct intel_crtc_state *crtc_state)
7868 {
7869 struct drm_device *dev = crtc->base.dev;
7870 struct drm_i915_private *dev_priv = to_i915(dev);
7871 const struct intel_limit *limit;
7872 int refclk = 96000;
7873
7874 memset(&crtc_state->dpll_hw_state, 0,
7875 sizeof(crtc_state->dpll_hw_state));
7876
7877 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7878 if (intel_panel_use_ssc(dev_priv)) {
7879 refclk = dev_priv->vbt.lvds_ssc_freq;
7880 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7881 }
7882
7883 limit = &intel_limits_pineview_lvds;
7884 } else {
7885 limit = &intel_limits_pineview_sdvo;
7886 }
7887
7888 if (!crtc_state->clock_set &&
7889 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7890 refclk, NULL, &crtc_state->dpll)) {
7891 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7892 return -EINVAL;
7893 }
7894
7895 i9xx_compute_dpll(crtc, crtc_state, NULL);
7896
7897 return 0;
7898 }
7899
7900 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7901 struct intel_crtc_state *crtc_state)
7902 {
7903 struct drm_device *dev = crtc->base.dev;
7904 struct drm_i915_private *dev_priv = to_i915(dev);
7905 const struct intel_limit *limit;
7906 int refclk = 96000;
7907
7908 memset(&crtc_state->dpll_hw_state, 0,
7909 sizeof(crtc_state->dpll_hw_state));
7910
7911 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7912 if (intel_panel_use_ssc(dev_priv)) {
7913 refclk = dev_priv->vbt.lvds_ssc_freq;
7914 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7915 }
7916
7917 limit = &intel_limits_i9xx_lvds;
7918 } else {
7919 limit = &intel_limits_i9xx_sdvo;
7920 }
7921
7922 if (!crtc_state->clock_set &&
7923 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7924 refclk, NULL, &crtc_state->dpll)) {
7925 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7926 return -EINVAL;
7927 }
7928
7929 i9xx_compute_dpll(crtc, crtc_state, NULL);
7930
7931 return 0;
7932 }
7933
7934 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7935 struct intel_crtc_state *crtc_state)
7936 {
7937 int refclk = 100000;
7938 const struct intel_limit *limit = &intel_limits_chv;
7939
7940 memset(&crtc_state->dpll_hw_state, 0,
7941 sizeof(crtc_state->dpll_hw_state));
7942
7943 if (!crtc_state->clock_set &&
7944 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7945 refclk, NULL, &crtc_state->dpll)) {
7946 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7947 return -EINVAL;
7948 }
7949
7950 chv_compute_dpll(crtc, crtc_state);
7951
7952 return 0;
7953 }
7954
7955 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7956 struct intel_crtc_state *crtc_state)
7957 {
7958 int refclk = 100000;
7959 const struct intel_limit *limit = &intel_limits_vlv;
7960
7961 memset(&crtc_state->dpll_hw_state, 0,
7962 sizeof(crtc_state->dpll_hw_state));
7963
7964 if (!crtc_state->clock_set &&
7965 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7966 refclk, NULL, &crtc_state->dpll)) {
7967 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7968 return -EINVAL;
7969 }
7970
7971 vlv_compute_dpll(crtc, crtc_state);
7972
7973 return 0;
7974 }
7975
7976 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
7977 {
7978 if (IS_I830(dev_priv))
7979 return false;
7980
7981 return INTEL_GEN(dev_priv) >= 4 ||
7982 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
7983 }
7984
7985 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7986 struct intel_crtc_state *pipe_config)
7987 {
7988 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7989 u32 tmp;
7990
7991 if (!i9xx_has_pfit(dev_priv))
7992 return;
7993
7994 tmp = I915_READ(PFIT_CONTROL);
7995 if (!(tmp & PFIT_ENABLE))
7996 return;
7997
7998 /* Check whether the pfit is attached to our pipe. */
7999 if (INTEL_GEN(dev_priv) < 4) {
8000 if (crtc->pipe != PIPE_B)
8001 return;
8002 } else {
8003 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8004 return;
8005 }
8006
8007 pipe_config->gmch_pfit.control = tmp;
8008 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8009 }
8010
8011 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8012 struct intel_crtc_state *pipe_config)
8013 {
8014 struct drm_device *dev = crtc->base.dev;
8015 struct drm_i915_private *dev_priv = to_i915(dev);
8016 int pipe = pipe_config->cpu_transcoder;
8017 struct dpll clock;
8018 u32 mdiv;
8019 int refclk = 100000;
8020
8021 /* In case of DSI, DPLL will not be used */
8022 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8023 return;
8024
8025 mutex_lock(&dev_priv->sb_lock);
8026 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8027 mutex_unlock(&dev_priv->sb_lock);
8028
8029 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8030 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8031 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8032 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8033 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8034
8035 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8036 }
8037
8038 static void
8039 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8040 struct intel_initial_plane_config *plane_config)
8041 {
8042 struct drm_device *dev = crtc->base.dev;
8043 struct drm_i915_private *dev_priv = to_i915(dev);
8044 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8045 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8046 enum pipe pipe;
8047 u32 val, base, offset;
8048 int fourcc, pixel_format;
8049 unsigned int aligned_height;
8050 struct drm_framebuffer *fb;
8051 struct intel_framebuffer *intel_fb;
8052
8053 if (!plane->get_hw_state(plane, &pipe))
8054 return;
8055
8056 WARN_ON(pipe != crtc->pipe);
8057
8058 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8059 if (!intel_fb) {
8060 DRM_DEBUG_KMS("failed to alloc fb\n");
8061 return;
8062 }
8063
8064 fb = &intel_fb->base;
8065
8066 fb->dev = dev;
8067
8068 val = I915_READ(DSPCNTR(i9xx_plane));
8069
8070 if (INTEL_GEN(dev_priv) >= 4) {
8071 if (val & DISPPLANE_TILED) {
8072 plane_config->tiling = I915_TILING_X;
8073 fb->modifier = I915_FORMAT_MOD_X_TILED;
8074 }
8075
8076 if (val & DISPPLANE_ROTATE_180)
8077 plane_config->rotation = DRM_MODE_ROTATE_180;
8078 }
8079
8080 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8081 val & DISPPLANE_MIRROR)
8082 plane_config->rotation |= DRM_MODE_REFLECT_X;
8083
8084 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8085 fourcc = i9xx_format_to_fourcc(pixel_format);
8086 fb->format = drm_format_info(fourcc);
8087
8088 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8089 offset = I915_READ(DSPOFFSET(i9xx_plane));
8090 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8091 } else if (INTEL_GEN(dev_priv) >= 4) {
8092 if (plane_config->tiling)
8093 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8094 else
8095 offset = I915_READ(DSPLINOFF(i9xx_plane));
8096 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8097 } else {
8098 base = I915_READ(DSPADDR(i9xx_plane));
8099 }
8100 plane_config->base = base;
8101
8102 val = I915_READ(PIPESRC(pipe));
8103 fb->width = ((val >> 16) & 0xfff) + 1;
8104 fb->height = ((val >> 0) & 0xfff) + 1;
8105
8106 val = I915_READ(DSPSTRIDE(i9xx_plane));
8107 fb->pitches[0] = val & 0xffffffc0;
8108
8109 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8110
8111 plane_config->size = fb->pitches[0] * aligned_height;
8112
8113 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8114 crtc->base.name, plane->base.name, fb->width, fb->height,
8115 fb->format->cpp[0] * 8, base, fb->pitches[0],
8116 plane_config->size);
8117
8118 plane_config->fb = intel_fb;
8119 }
8120
8121 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8122 struct intel_crtc_state *pipe_config)
8123 {
8124 struct drm_device *dev = crtc->base.dev;
8125 struct drm_i915_private *dev_priv = to_i915(dev);
8126 int pipe = pipe_config->cpu_transcoder;
8127 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8128 struct dpll clock;
8129 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8130 int refclk = 100000;
8131
8132 /* In case of DSI, DPLL will not be used */
8133 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8134 return;
8135
8136 mutex_lock(&dev_priv->sb_lock);
8137 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8138 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8139 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8140 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8141 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8142 mutex_unlock(&dev_priv->sb_lock);
8143
8144 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8145 clock.m2 = (pll_dw0 & 0xff) << 22;
8146 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8147 clock.m2 |= pll_dw2 & 0x3fffff;
8148 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8149 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8150 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8151
8152 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8153 }
8154
8155 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8156 struct intel_crtc_state *pipe_config)
8157 {
8158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8159 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8160
8161 pipe_config->lspcon_downsampling = false;
8162
8163 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8164 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8165
8166 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8167 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8168 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8169
8170 if (ycbcr420_enabled) {
8171 /* We support 4:2:0 in full blend mode only */
8172 if (!blend)
8173 output = INTEL_OUTPUT_FORMAT_INVALID;
8174 else if (!(IS_GEMINILAKE(dev_priv) ||
8175 INTEL_GEN(dev_priv) >= 10))
8176 output = INTEL_OUTPUT_FORMAT_INVALID;
8177 else
8178 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8179 } else {
8180 /*
8181 * Currently there is no interface defined to
8182 * check user preference between RGB/YCBCR444
8183 * or YCBCR420. So the only possible case for
8184 * YCBCR444 usage is driving YCBCR420 output
8185 * with LSPCON, when pipe is configured for
8186 * YCBCR444 output and LSPCON takes care of
8187 * downsampling it.
8188 */
8189 pipe_config->lspcon_downsampling = true;
8190 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8191 }
8192 }
8193 }
8194
8195 pipe_config->output_format = output;
8196 }
8197
8198 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8199 {
8200 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8201 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8202 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8203 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8204 u32 tmp;
8205
8206 tmp = I915_READ(DSPCNTR(i9xx_plane));
8207
8208 if (tmp & DISPPLANE_GAMMA_ENABLE)
8209 crtc_state->gamma_enable = true;
8210
8211 if (!HAS_GMCH(dev_priv) &&
8212 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8213 crtc_state->csc_enable = true;
8214 }
8215
8216 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8217 struct intel_crtc_state *pipe_config)
8218 {
8219 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8220 enum intel_display_power_domain power_domain;
8221 intel_wakeref_t wakeref;
8222 u32 tmp;
8223 bool ret;
8224
8225 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8226 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8227 if (!wakeref)
8228 return false;
8229
8230 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8231 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8232 pipe_config->shared_dpll = NULL;
8233
8234 ret = false;
8235
8236 tmp = I915_READ(PIPECONF(crtc->pipe));
8237 if (!(tmp & PIPECONF_ENABLE))
8238 goto out;
8239
8240 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8241 IS_CHERRYVIEW(dev_priv)) {
8242 switch (tmp & PIPECONF_BPC_MASK) {
8243 case PIPECONF_6BPC:
8244 pipe_config->pipe_bpp = 18;
8245 break;
8246 case PIPECONF_8BPC:
8247 pipe_config->pipe_bpp = 24;
8248 break;
8249 case PIPECONF_10BPC:
8250 pipe_config->pipe_bpp = 30;
8251 break;
8252 default:
8253 break;
8254 }
8255 }
8256
8257 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8258 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8259 pipe_config->limited_color_range = true;
8260
8261 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8262 PIPECONF_GAMMA_MODE_SHIFT;
8263
8264 if (IS_CHERRYVIEW(dev_priv))
8265 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8266
8267 i9xx_get_pipe_color_config(pipe_config);
8268
8269 if (INTEL_GEN(dev_priv) < 4)
8270 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8271
8272 intel_get_pipe_timings(crtc, pipe_config);
8273 intel_get_pipe_src_size(crtc, pipe_config);
8274
8275 i9xx_get_pfit_config(crtc, pipe_config);
8276
8277 if (INTEL_GEN(dev_priv) >= 4) {
8278 /* No way to read it out on pipes B and C */
8279 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8280 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8281 else
8282 tmp = I915_READ(DPLL_MD(crtc->pipe));
8283 pipe_config->pixel_multiplier =
8284 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8285 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8286 pipe_config->dpll_hw_state.dpll_md = tmp;
8287 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8288 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8289 tmp = I915_READ(DPLL(crtc->pipe));
8290 pipe_config->pixel_multiplier =
8291 ((tmp & SDVO_MULTIPLIER_MASK)
8292 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8293 } else {
8294 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8295 * port and will be fixed up in the encoder->get_config
8296 * function. */
8297 pipe_config->pixel_multiplier = 1;
8298 }
8299 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8300 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8301 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8302 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8303 } else {
8304 /* Mask out read-only status bits. */
8305 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8306 DPLL_PORTC_READY_MASK |
8307 DPLL_PORTB_READY_MASK);
8308 }
8309
8310 if (IS_CHERRYVIEW(dev_priv))
8311 chv_crtc_clock_get(crtc, pipe_config);
8312 else if (IS_VALLEYVIEW(dev_priv))
8313 vlv_crtc_clock_get(crtc, pipe_config);
8314 else
8315 i9xx_crtc_clock_get(crtc, pipe_config);
8316
8317 /*
8318 * Normally the dotclock is filled in by the encoder .get_config()
8319 * but in case the pipe is enabled w/o any ports we need a sane
8320 * default.
8321 */
8322 pipe_config->base.adjusted_mode.crtc_clock =
8323 pipe_config->port_clock / pipe_config->pixel_multiplier;
8324
8325 ret = true;
8326
8327 out:
8328 intel_display_power_put(dev_priv, power_domain, wakeref);
8329
8330 return ret;
8331 }
8332
8333 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8334 {
8335 struct intel_encoder *encoder;
8336 int i;
8337 u32 val, final;
8338 bool has_lvds = false;
8339 bool has_cpu_edp = false;
8340 bool has_panel = false;
8341 bool has_ck505 = false;
8342 bool can_ssc = false;
8343 bool using_ssc_source = false;
8344
8345 /* We need to take the global config into account */
8346 for_each_intel_encoder(&dev_priv->drm, encoder) {
8347 switch (encoder->type) {
8348 case INTEL_OUTPUT_LVDS:
8349 has_panel = true;
8350 has_lvds = true;
8351 break;
8352 case INTEL_OUTPUT_EDP:
8353 has_panel = true;
8354 if (encoder->port == PORT_A)
8355 has_cpu_edp = true;
8356 break;
8357 default:
8358 break;
8359 }
8360 }
8361
8362 if (HAS_PCH_IBX(dev_priv)) {
8363 has_ck505 = dev_priv->vbt.display_clock_mode;
8364 can_ssc = has_ck505;
8365 } else {
8366 has_ck505 = false;
8367 can_ssc = true;
8368 }
8369
8370 /* Check if any DPLLs are using the SSC source */
8371 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8372 u32 temp = I915_READ(PCH_DPLL(i));
8373
8374 if (!(temp & DPLL_VCO_ENABLE))
8375 continue;
8376
8377 if ((temp & PLL_REF_INPUT_MASK) ==
8378 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8379 using_ssc_source = true;
8380 break;
8381 }
8382 }
8383
8384 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8385 has_panel, has_lvds, has_ck505, using_ssc_source);
8386
8387 /* Ironlake: try to setup display ref clock before DPLL
8388 * enabling. This is only under driver's control after
8389 * PCH B stepping, previous chipset stepping should be
8390 * ignoring this setting.
8391 */
8392 val = I915_READ(PCH_DREF_CONTROL);
8393
8394 /* As we must carefully and slowly disable/enable each source in turn,
8395 * compute the final state we want first and check if we need to
8396 * make any changes at all.
8397 */
8398 final = val;
8399 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8400 if (has_ck505)
8401 final |= DREF_NONSPREAD_CK505_ENABLE;
8402 else
8403 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8404
8405 final &= ~DREF_SSC_SOURCE_MASK;
8406 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8407 final &= ~DREF_SSC1_ENABLE;
8408
8409 if (has_panel) {
8410 final |= DREF_SSC_SOURCE_ENABLE;
8411
8412 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8413 final |= DREF_SSC1_ENABLE;
8414
8415 if (has_cpu_edp) {
8416 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8417 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8418 else
8419 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8420 } else
8421 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8422 } else if (using_ssc_source) {
8423 final |= DREF_SSC_SOURCE_ENABLE;
8424 final |= DREF_SSC1_ENABLE;
8425 }
8426
8427 if (final == val)
8428 return;
8429
8430 /* Always enable nonspread source */
8431 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8432
8433 if (has_ck505)
8434 val |= DREF_NONSPREAD_CK505_ENABLE;
8435 else
8436 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8437
8438 if (has_panel) {
8439 val &= ~DREF_SSC_SOURCE_MASK;
8440 val |= DREF_SSC_SOURCE_ENABLE;
8441
8442 /* SSC must be turned on before enabling the CPU output */
8443 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8444 DRM_DEBUG_KMS("Using SSC on panel\n");
8445 val |= DREF_SSC1_ENABLE;
8446 } else
8447 val &= ~DREF_SSC1_ENABLE;
8448
8449 /* Get SSC going before enabling the outputs */
8450 I915_WRITE(PCH_DREF_CONTROL, val);
8451 POSTING_READ(PCH_DREF_CONTROL);
8452 udelay(200);
8453
8454 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8455
8456 /* Enable CPU source on CPU attached eDP */
8457 if (has_cpu_edp) {
8458 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8459 DRM_DEBUG_KMS("Using SSC on eDP\n");
8460 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8461 } else
8462 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8463 } else
8464 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8465
8466 I915_WRITE(PCH_DREF_CONTROL, val);
8467 POSTING_READ(PCH_DREF_CONTROL);
8468 udelay(200);
8469 } else {
8470 DRM_DEBUG_KMS("Disabling CPU source output\n");
8471
8472 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8473
8474 /* Turn off CPU output */
8475 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8476
8477 I915_WRITE(PCH_DREF_CONTROL, val);
8478 POSTING_READ(PCH_DREF_CONTROL);
8479 udelay(200);
8480
8481 if (!using_ssc_source) {
8482 DRM_DEBUG_KMS("Disabling SSC source\n");
8483
8484 /* Turn off the SSC source */
8485 val &= ~DREF_SSC_SOURCE_MASK;
8486 val |= DREF_SSC_SOURCE_DISABLE;
8487
8488 /* Turn off SSC1 */
8489 val &= ~DREF_SSC1_ENABLE;
8490
8491 I915_WRITE(PCH_DREF_CONTROL, val);
8492 POSTING_READ(PCH_DREF_CONTROL);
8493 udelay(200);
8494 }
8495 }
8496
8497 BUG_ON(val != final);
8498 }
8499
8500 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8501 {
8502 u32 tmp;
8503
8504 tmp = I915_READ(SOUTH_CHICKEN2);
8505 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8506 I915_WRITE(SOUTH_CHICKEN2, tmp);
8507
8508 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8509 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8510 DRM_ERROR("FDI mPHY reset assert timeout\n");
8511
8512 tmp = I915_READ(SOUTH_CHICKEN2);
8513 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8514 I915_WRITE(SOUTH_CHICKEN2, tmp);
8515
8516 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8517 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8518 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8519 }
8520
8521 /* WaMPhyProgramming:hsw */
8522 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8523 {
8524 u32 tmp;
8525
8526 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8527 tmp &= ~(0xFF << 24);
8528 tmp |= (0x12 << 24);
8529 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8530
8531 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8532 tmp |= (1 << 11);
8533 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8534
8535 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8536 tmp |= (1 << 11);
8537 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8538
8539 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8540 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8541 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8542
8543 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8544 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8545 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8546
8547 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8548 tmp &= ~(7 << 13);
8549 tmp |= (5 << 13);
8550 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8551
8552 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8553 tmp &= ~(7 << 13);
8554 tmp |= (5 << 13);
8555 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8556
8557 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8558 tmp &= ~0xFF;
8559 tmp |= 0x1C;
8560 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8561
8562 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8563 tmp &= ~0xFF;
8564 tmp |= 0x1C;
8565 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8566
8567 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8568 tmp &= ~(0xFF << 16);
8569 tmp |= (0x1C << 16);
8570 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8571
8572 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8573 tmp &= ~(0xFF << 16);
8574 tmp |= (0x1C << 16);
8575 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8576
8577 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8578 tmp |= (1 << 27);
8579 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8580
8581 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8582 tmp |= (1 << 27);
8583 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8584
8585 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8586 tmp &= ~(0xF << 28);
8587 tmp |= (4 << 28);
8588 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8589
8590 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8591 tmp &= ~(0xF << 28);
8592 tmp |= (4 << 28);
8593 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8594 }
8595
8596 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8597 * Programming" based on the parameters passed:
8598 * - Sequence to enable CLKOUT_DP
8599 * - Sequence to enable CLKOUT_DP without spread
8600 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8601 */
8602 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8603 bool with_spread, bool with_fdi)
8604 {
8605 u32 reg, tmp;
8606
8607 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8608 with_spread = true;
8609 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8610 with_fdi, "LP PCH doesn't have FDI\n"))
8611 with_fdi = false;
8612
8613 mutex_lock(&dev_priv->sb_lock);
8614
8615 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8616 tmp &= ~SBI_SSCCTL_DISABLE;
8617 tmp |= SBI_SSCCTL_PATHALT;
8618 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8619
8620 udelay(24);
8621
8622 if (with_spread) {
8623 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8624 tmp &= ~SBI_SSCCTL_PATHALT;
8625 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8626
8627 if (with_fdi) {
8628 lpt_reset_fdi_mphy(dev_priv);
8629 lpt_program_fdi_mphy(dev_priv);
8630 }
8631 }
8632
8633 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8634 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8635 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8636 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8637
8638 mutex_unlock(&dev_priv->sb_lock);
8639 }
8640
8641 /* Sequence to disable CLKOUT_DP */
8642 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8643 {
8644 u32 reg, tmp;
8645
8646 mutex_lock(&dev_priv->sb_lock);
8647
8648 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8649 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8650 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8651 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8652
8653 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8654 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8655 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8656 tmp |= SBI_SSCCTL_PATHALT;
8657 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8658 udelay(32);
8659 }
8660 tmp |= SBI_SSCCTL_DISABLE;
8661 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8662 }
8663
8664 mutex_unlock(&dev_priv->sb_lock);
8665 }
8666
8667 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8668
8669 static const u16 sscdivintphase[] = {
8670 [BEND_IDX( 50)] = 0x3B23,
8671 [BEND_IDX( 45)] = 0x3B23,
8672 [BEND_IDX( 40)] = 0x3C23,
8673 [BEND_IDX( 35)] = 0x3C23,
8674 [BEND_IDX( 30)] = 0x3D23,
8675 [BEND_IDX( 25)] = 0x3D23,
8676 [BEND_IDX( 20)] = 0x3E23,
8677 [BEND_IDX( 15)] = 0x3E23,
8678 [BEND_IDX( 10)] = 0x3F23,
8679 [BEND_IDX( 5)] = 0x3F23,
8680 [BEND_IDX( 0)] = 0x0025,
8681 [BEND_IDX( -5)] = 0x0025,
8682 [BEND_IDX(-10)] = 0x0125,
8683 [BEND_IDX(-15)] = 0x0125,
8684 [BEND_IDX(-20)] = 0x0225,
8685 [BEND_IDX(-25)] = 0x0225,
8686 [BEND_IDX(-30)] = 0x0325,
8687 [BEND_IDX(-35)] = 0x0325,
8688 [BEND_IDX(-40)] = 0x0425,
8689 [BEND_IDX(-45)] = 0x0425,
8690 [BEND_IDX(-50)] = 0x0525,
8691 };
8692
8693 /*
8694 * Bend CLKOUT_DP
8695 * steps -50 to 50 inclusive, in steps of 5
8696 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8697 * change in clock period = -(steps / 10) * 5.787 ps
8698 */
8699 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8700 {
8701 u32 tmp;
8702 int idx = BEND_IDX(steps);
8703
8704 if (WARN_ON(steps % 5 != 0))
8705 return;
8706
8707 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8708 return;
8709
8710 mutex_lock(&dev_priv->sb_lock);
8711
8712 if (steps % 10 != 0)
8713 tmp = 0xAAAAAAAB;
8714 else
8715 tmp = 0x00000000;
8716 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8717
8718 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8719 tmp &= 0xffff0000;
8720 tmp |= sscdivintphase[idx];
8721 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8722
8723 mutex_unlock(&dev_priv->sb_lock);
8724 }
8725
8726 #undef BEND_IDX
8727
8728 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8729 {
8730 struct intel_encoder *encoder;
8731 bool has_vga = false;
8732
8733 for_each_intel_encoder(&dev_priv->drm, encoder) {
8734 switch (encoder->type) {
8735 case INTEL_OUTPUT_ANALOG:
8736 has_vga = true;
8737 break;
8738 default:
8739 break;
8740 }
8741 }
8742
8743 if (has_vga) {
8744 lpt_bend_clkout_dp(dev_priv, 0);
8745 lpt_enable_clkout_dp(dev_priv, true, true);
8746 } else {
8747 lpt_disable_clkout_dp(dev_priv);
8748 }
8749 }
8750
8751 /*
8752 * Initialize reference clocks when the driver loads
8753 */
8754 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8755 {
8756 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8757 ironlake_init_pch_refclk(dev_priv);
8758 else if (HAS_PCH_LPT(dev_priv))
8759 lpt_init_pch_refclk(dev_priv);
8760 }
8761
8762 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8763 {
8764 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8765 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8766 enum pipe pipe = crtc->pipe;
8767 u32 val;
8768
8769 val = 0;
8770
8771 switch (crtc_state->pipe_bpp) {
8772 case 18:
8773 val |= PIPECONF_6BPC;
8774 break;
8775 case 24:
8776 val |= PIPECONF_8BPC;
8777 break;
8778 case 30:
8779 val |= PIPECONF_10BPC;
8780 break;
8781 case 36:
8782 val |= PIPECONF_12BPC;
8783 break;
8784 default:
8785 /* Case prevented by intel_choose_pipe_bpp_dither. */
8786 BUG();
8787 }
8788
8789 if (crtc_state->dither)
8790 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8791
8792 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8793 val |= PIPECONF_INTERLACED_ILK;
8794 else
8795 val |= PIPECONF_PROGRESSIVE;
8796
8797 if (crtc_state->limited_color_range)
8798 val |= PIPECONF_COLOR_RANGE_SELECT;
8799
8800 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8801
8802 I915_WRITE(PIPECONF(pipe), val);
8803 POSTING_READ(PIPECONF(pipe));
8804 }
8805
8806 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8807 {
8808 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8809 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8810 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8811 u32 val = 0;
8812
8813 if (IS_HASWELL(dev_priv) && crtc_state->dither)
8814 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8815
8816 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8817 val |= PIPECONF_INTERLACED_ILK;
8818 else
8819 val |= PIPECONF_PROGRESSIVE;
8820
8821 I915_WRITE(PIPECONF(cpu_transcoder), val);
8822 POSTING_READ(PIPECONF(cpu_transcoder));
8823 }
8824
8825 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8826 {
8827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8828 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8829
8830 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8831 u32 val = 0;
8832
8833 switch (crtc_state->pipe_bpp) {
8834 case 18:
8835 val |= PIPEMISC_DITHER_6_BPC;
8836 break;
8837 case 24:
8838 val |= PIPEMISC_DITHER_8_BPC;
8839 break;
8840 case 30:
8841 val |= PIPEMISC_DITHER_10_BPC;
8842 break;
8843 case 36:
8844 val |= PIPEMISC_DITHER_12_BPC;
8845 break;
8846 default:
8847 /* Case prevented by pipe_config_set_bpp. */
8848 BUG();
8849 }
8850
8851 if (crtc_state->dither)
8852 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8853
8854 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8855 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8856 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8857
8858 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8859 val |= PIPEMISC_YUV420_ENABLE |
8860 PIPEMISC_YUV420_MODE_FULL_BLEND;
8861
8862 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8863 }
8864 }
8865
8866 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8867 {
8868 /*
8869 * Account for spread spectrum to avoid
8870 * oversubscribing the link. Max center spread
8871 * is 2.5%; use 5% for safety's sake.
8872 */
8873 u32 bps = target_clock * bpp * 21 / 20;
8874 return DIV_ROUND_UP(bps, link_bw * 8);
8875 }
8876
8877 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8878 {
8879 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8880 }
8881
8882 static void ironlake_compute_dpll(struct intel_crtc *crtc,
8883 struct intel_crtc_state *crtc_state,
8884 struct dpll *reduced_clock)
8885 {
8886 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8887 u32 dpll, fp, fp2;
8888 int factor;
8889
8890 /* Enable autotuning of the PLL clock (if permissible) */
8891 factor = 21;
8892 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8893 if ((intel_panel_use_ssc(dev_priv) &&
8894 dev_priv->vbt.lvds_ssc_freq == 100000) ||
8895 (HAS_PCH_IBX(dev_priv) &&
8896 intel_is_dual_link_lvds(dev_priv)))
8897 factor = 25;
8898 } else if (crtc_state->sdvo_tv_clock) {
8899 factor = 20;
8900 }
8901
8902 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8903
8904 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8905 fp |= FP_CB_TUNE;
8906
8907 if (reduced_clock) {
8908 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8909
8910 if (reduced_clock->m < factor * reduced_clock->n)
8911 fp2 |= FP_CB_TUNE;
8912 } else {
8913 fp2 = fp;
8914 }
8915
8916 dpll = 0;
8917
8918 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8919 dpll |= DPLLB_MODE_LVDS;
8920 else
8921 dpll |= DPLLB_MODE_DAC_SERIAL;
8922
8923 dpll |= (crtc_state->pixel_multiplier - 1)
8924 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8925
8926 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8927 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8928 dpll |= DPLL_SDVO_HIGH_SPEED;
8929
8930 if (intel_crtc_has_dp_encoder(crtc_state))
8931 dpll |= DPLL_SDVO_HIGH_SPEED;
8932
8933 /*
8934 * The high speed IO clock is only really required for
8935 * SDVO/HDMI/DP, but we also enable it for CRT to make it
8936 * possible to share the DPLL between CRT and HDMI. Enabling
8937 * the clock needlessly does no real harm, except use up a
8938 * bit of power potentially.
8939 *
8940 * We'll limit this to IVB with 3 pipes, since it has only two
8941 * DPLLs and so DPLL sharing is the only way to get three pipes
8942 * driving PCH ports at the same time. On SNB we could do this,
8943 * and potentially avoid enabling the second DPLL, but it's not
8944 * clear if it''s a win or loss power wise. No point in doing
8945 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8946 */
8947 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8948 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8949 dpll |= DPLL_SDVO_HIGH_SPEED;
8950
8951 /* compute bitmask from p1 value */
8952 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8953 /* also FPA1 */
8954 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8955
8956 switch (crtc_state->dpll.p2) {
8957 case 5:
8958 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8959 break;
8960 case 7:
8961 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8962 break;
8963 case 10:
8964 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8965 break;
8966 case 14:
8967 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8968 break;
8969 }
8970
8971 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8972 intel_panel_use_ssc(dev_priv))
8973 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8974 else
8975 dpll |= PLL_REF_INPUT_DREFCLK;
8976
8977 dpll |= DPLL_VCO_ENABLE;
8978
8979 crtc_state->dpll_hw_state.dpll = dpll;
8980 crtc_state->dpll_hw_state.fp0 = fp;
8981 crtc_state->dpll_hw_state.fp1 = fp2;
8982 }
8983
8984 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8985 struct intel_crtc_state *crtc_state)
8986 {
8987 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8988 const struct intel_limit *limit;
8989 int refclk = 120000;
8990
8991 memset(&crtc_state->dpll_hw_state, 0,
8992 sizeof(crtc_state->dpll_hw_state));
8993
8994 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8995 if (!crtc_state->has_pch_encoder)
8996 return 0;
8997
8998 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8999 if (intel_panel_use_ssc(dev_priv)) {
9000 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9001 dev_priv->vbt.lvds_ssc_freq);
9002 refclk = dev_priv->vbt.lvds_ssc_freq;
9003 }
9004
9005 if (intel_is_dual_link_lvds(dev_priv)) {
9006 if (refclk == 100000)
9007 limit = &intel_limits_ironlake_dual_lvds_100m;
9008 else
9009 limit = &intel_limits_ironlake_dual_lvds;
9010 } else {
9011 if (refclk == 100000)
9012 limit = &intel_limits_ironlake_single_lvds_100m;
9013 else
9014 limit = &intel_limits_ironlake_single_lvds;
9015 }
9016 } else {
9017 limit = &intel_limits_ironlake_dac;
9018 }
9019
9020 if (!crtc_state->clock_set &&
9021 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9022 refclk, NULL, &crtc_state->dpll)) {
9023 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9024 return -EINVAL;
9025 }
9026
9027 ironlake_compute_dpll(crtc, crtc_state, NULL);
9028
9029 if (!intel_get_shared_dpll(crtc_state, NULL)) {
9030 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9031 pipe_name(crtc->pipe));
9032 return -EINVAL;
9033 }
9034
9035 return 0;
9036 }
9037
9038 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9039 struct intel_link_m_n *m_n)
9040 {
9041 struct drm_device *dev = crtc->base.dev;
9042 struct drm_i915_private *dev_priv = to_i915(dev);
9043 enum pipe pipe = crtc->pipe;
9044
9045 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9046 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9047 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9048 & ~TU_SIZE_MASK;
9049 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9050 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9051 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9052 }
9053
9054 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9055 enum transcoder transcoder,
9056 struct intel_link_m_n *m_n,
9057 struct intel_link_m_n *m2_n2)
9058 {
9059 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9060 enum pipe pipe = crtc->pipe;
9061
9062 if (INTEL_GEN(dev_priv) >= 5) {
9063 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9064 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9065 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9066 & ~TU_SIZE_MASK;
9067 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9068 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9069 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9070
9071 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9072 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9073 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9074 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9075 & ~TU_SIZE_MASK;
9076 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9077 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9078 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9079 }
9080 } else {
9081 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9082 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9083 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9084 & ~TU_SIZE_MASK;
9085 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9086 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9087 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9088 }
9089 }
9090
9091 void intel_dp_get_m_n(struct intel_crtc *crtc,
9092 struct intel_crtc_state *pipe_config)
9093 {
9094 if (pipe_config->has_pch_encoder)
9095 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9096 else
9097 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9098 &pipe_config->dp_m_n,
9099 &pipe_config->dp_m2_n2);
9100 }
9101
9102 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9103 struct intel_crtc_state *pipe_config)
9104 {
9105 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9106 &pipe_config->fdi_m_n, NULL);
9107 }
9108
9109 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9110 struct intel_crtc_state *pipe_config)
9111 {
9112 struct drm_device *dev = crtc->base.dev;
9113 struct drm_i915_private *dev_priv = to_i915(dev);
9114 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9115 u32 ps_ctrl = 0;
9116 int id = -1;
9117 int i;
9118
9119 /* find scaler attached to this pipe */
9120 for (i = 0; i < crtc->num_scalers; i++) {
9121 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9122 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9123 id = i;
9124 pipe_config->pch_pfit.enabled = true;
9125 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9126 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9127 scaler_state->scalers[i].in_use = true;
9128 break;
9129 }
9130 }
9131
9132 scaler_state->scaler_id = id;
9133 if (id >= 0) {
9134 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9135 } else {
9136 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9137 }
9138 }
9139
9140 static void
9141 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9142 struct intel_initial_plane_config *plane_config)
9143 {
9144 struct drm_device *dev = crtc->base.dev;
9145 struct drm_i915_private *dev_priv = to_i915(dev);
9146 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9147 enum plane_id plane_id = plane->id;
9148 enum pipe pipe;
9149 u32 val, base, offset, stride_mult, tiling, alpha;
9150 int fourcc, pixel_format;
9151 unsigned int aligned_height;
9152 struct drm_framebuffer *fb;
9153 struct intel_framebuffer *intel_fb;
9154
9155 if (!plane->get_hw_state(plane, &pipe))
9156 return;
9157
9158 WARN_ON(pipe != crtc->pipe);
9159
9160 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9161 if (!intel_fb) {
9162 DRM_DEBUG_KMS("failed to alloc fb\n");
9163 return;
9164 }
9165
9166 fb = &intel_fb->base;
9167
9168 fb->dev = dev;
9169
9170 val = I915_READ(PLANE_CTL(pipe, plane_id));
9171
9172 if (INTEL_GEN(dev_priv) >= 11)
9173 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9174 else
9175 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9176
9177 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9178 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9179 alpha &= PLANE_COLOR_ALPHA_MASK;
9180 } else {
9181 alpha = val & PLANE_CTL_ALPHA_MASK;
9182 }
9183
9184 fourcc = skl_format_to_fourcc(pixel_format,
9185 val & PLANE_CTL_ORDER_RGBX, alpha);
9186 fb->format = drm_format_info(fourcc);
9187
9188 tiling = val & PLANE_CTL_TILED_MASK;
9189 switch (tiling) {
9190 case PLANE_CTL_TILED_LINEAR:
9191 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9192 break;
9193 case PLANE_CTL_TILED_X:
9194 plane_config->tiling = I915_TILING_X;
9195 fb->modifier = I915_FORMAT_MOD_X_TILED;
9196 break;
9197 case PLANE_CTL_TILED_Y:
9198 plane_config->tiling = I915_TILING_Y;
9199 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9200 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9201 else
9202 fb->modifier = I915_FORMAT_MOD_Y_TILED;
9203 break;
9204 case PLANE_CTL_TILED_YF:
9205 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9206 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9207 else
9208 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9209 break;
9210 default:
9211 MISSING_CASE(tiling);
9212 goto error;
9213 }
9214
9215 /*
9216 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9217 * while i915 HW rotation is clockwise, thats why this swapping.
9218 */
9219 switch (val & PLANE_CTL_ROTATE_MASK) {
9220 case PLANE_CTL_ROTATE_0:
9221 plane_config->rotation = DRM_MODE_ROTATE_0;
9222 break;
9223 case PLANE_CTL_ROTATE_90:
9224 plane_config->rotation = DRM_MODE_ROTATE_270;
9225 break;
9226 case PLANE_CTL_ROTATE_180:
9227 plane_config->rotation = DRM_MODE_ROTATE_180;
9228 break;
9229 case PLANE_CTL_ROTATE_270:
9230 plane_config->rotation = DRM_MODE_ROTATE_90;
9231 break;
9232 }
9233
9234 if (INTEL_GEN(dev_priv) >= 10 &&
9235 val & PLANE_CTL_FLIP_HORIZONTAL)
9236 plane_config->rotation |= DRM_MODE_REFLECT_X;
9237
9238 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9239 plane_config->base = base;
9240
9241 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9242
9243 val = I915_READ(PLANE_SIZE(pipe, plane_id));
9244 fb->height = ((val >> 16) & 0xfff) + 1;
9245 fb->width = ((val >> 0) & 0x1fff) + 1;
9246
9247 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9248 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9249 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9250
9251 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9252
9253 plane_config->size = fb->pitches[0] * aligned_height;
9254
9255 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9256 crtc->base.name, plane->base.name, fb->width, fb->height,
9257 fb->format->cpp[0] * 8, base, fb->pitches[0],
9258 plane_config->size);
9259
9260 plane_config->fb = intel_fb;
9261 return;
9262
9263 error:
9264 kfree(intel_fb);
9265 }
9266
9267 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9268 struct intel_crtc_state *pipe_config)
9269 {
9270 struct drm_device *dev = crtc->base.dev;
9271 struct drm_i915_private *dev_priv = to_i915(dev);
9272 u32 tmp;
9273
9274 tmp = I915_READ(PF_CTL(crtc->pipe));
9275
9276 if (tmp & PF_ENABLE) {
9277 pipe_config->pch_pfit.enabled = true;
9278 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9279 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9280
9281 /* We currently do not free assignements of panel fitters on
9282 * ivb/hsw (since we don't use the higher upscaling modes which
9283 * differentiates them) so just WARN about this case for now. */
9284 if (IS_GEN(dev_priv, 7)) {
9285 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9286 PF_PIPE_SEL_IVB(crtc->pipe));
9287 }
9288 }
9289 }
9290
9291 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9292 struct intel_crtc_state *pipe_config)
9293 {
9294 struct drm_device *dev = crtc->base.dev;
9295 struct drm_i915_private *dev_priv = to_i915(dev);
9296 enum intel_display_power_domain power_domain;
9297 intel_wakeref_t wakeref;
9298 u32 tmp;
9299 bool ret;
9300
9301 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9302 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9303 if (!wakeref)
9304 return false;
9305
9306 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9307 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9308 pipe_config->shared_dpll = NULL;
9309
9310 ret = false;
9311 tmp = I915_READ(PIPECONF(crtc->pipe));
9312 if (!(tmp & PIPECONF_ENABLE))
9313 goto out;
9314
9315 switch (tmp & PIPECONF_BPC_MASK) {
9316 case PIPECONF_6BPC:
9317 pipe_config->pipe_bpp = 18;
9318 break;
9319 case PIPECONF_8BPC:
9320 pipe_config->pipe_bpp = 24;
9321 break;
9322 case PIPECONF_10BPC:
9323 pipe_config->pipe_bpp = 30;
9324 break;
9325 case PIPECONF_12BPC:
9326 pipe_config->pipe_bpp = 36;
9327 break;
9328 default:
9329 break;
9330 }
9331
9332 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9333 pipe_config->limited_color_range = true;
9334
9335 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9336 PIPECONF_GAMMA_MODE_SHIFT;
9337
9338 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9339
9340 i9xx_get_pipe_color_config(pipe_config);
9341
9342 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9343 struct intel_shared_dpll *pll;
9344 enum intel_dpll_id pll_id;
9345
9346 pipe_config->has_pch_encoder = true;
9347
9348 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9349 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9350 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9351
9352 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9353
9354 if (HAS_PCH_IBX(dev_priv)) {
9355 /*
9356 * The pipe->pch transcoder and pch transcoder->pll
9357 * mapping is fixed.
9358 */
9359 pll_id = (enum intel_dpll_id) crtc->pipe;
9360 } else {
9361 tmp = I915_READ(PCH_DPLL_SEL);
9362 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9363 pll_id = DPLL_ID_PCH_PLL_B;
9364 else
9365 pll_id= DPLL_ID_PCH_PLL_A;
9366 }
9367
9368 pipe_config->shared_dpll =
9369 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9370 pll = pipe_config->shared_dpll;
9371
9372 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9373 &pipe_config->dpll_hw_state));
9374
9375 tmp = pipe_config->dpll_hw_state.dpll;
9376 pipe_config->pixel_multiplier =
9377 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9378 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9379
9380 ironlake_pch_clock_get(crtc, pipe_config);
9381 } else {
9382 pipe_config->pixel_multiplier = 1;
9383 }
9384
9385 intel_get_pipe_timings(crtc, pipe_config);
9386 intel_get_pipe_src_size(crtc, pipe_config);
9387
9388 ironlake_get_pfit_config(crtc, pipe_config);
9389
9390 ret = true;
9391
9392 out:
9393 intel_display_power_put(dev_priv, power_domain, wakeref);
9394
9395 return ret;
9396 }
9397
9398 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9399 {
9400 struct drm_device *dev = &dev_priv->drm;
9401 struct intel_crtc *crtc;
9402
9403 for_each_intel_crtc(dev, crtc)
9404 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9405 pipe_name(crtc->pipe));
9406
9407 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9408 "Display power well on\n");
9409 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9410 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9411 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9412 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9413 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9414 "CPU PWM1 enabled\n");
9415 if (IS_HASWELL(dev_priv))
9416 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9417 "CPU PWM2 enabled\n");
9418 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9419 "PCH PWM1 enabled\n");
9420 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9421 "Utility pin enabled\n");
9422 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9423
9424 /*
9425 * In theory we can still leave IRQs enabled, as long as only the HPD
9426 * interrupts remain enabled. We used to check for that, but since it's
9427 * gen-specific and since we only disable LCPLL after we fully disable
9428 * the interrupts, the check below should be enough.
9429 */
9430 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9431 }
9432
9433 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
9434 {
9435 if (IS_HASWELL(dev_priv))
9436 return I915_READ(D_COMP_HSW);
9437 else
9438 return I915_READ(D_COMP_BDW);
9439 }
9440
9441 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
9442 {
9443 if (IS_HASWELL(dev_priv)) {
9444 mutex_lock(&dev_priv->pcu_lock);
9445 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9446 val))
9447 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9448 mutex_unlock(&dev_priv->pcu_lock);
9449 } else {
9450 I915_WRITE(D_COMP_BDW, val);
9451 POSTING_READ(D_COMP_BDW);
9452 }
9453 }
9454
9455 /*
9456 * This function implements pieces of two sequences from BSpec:
9457 * - Sequence for display software to disable LCPLL
9458 * - Sequence for display software to allow package C8+
9459 * The steps implemented here are just the steps that actually touch the LCPLL
9460 * register. Callers should take care of disabling all the display engine
9461 * functions, doing the mode unset, fixing interrupts, etc.
9462 */
9463 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9464 bool switch_to_fclk, bool allow_power_down)
9465 {
9466 u32 val;
9467
9468 assert_can_disable_lcpll(dev_priv);
9469
9470 val = I915_READ(LCPLL_CTL);
9471
9472 if (switch_to_fclk) {
9473 val |= LCPLL_CD_SOURCE_FCLK;
9474 I915_WRITE(LCPLL_CTL, val);
9475
9476 if (wait_for_us(I915_READ(LCPLL_CTL) &
9477 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9478 DRM_ERROR("Switching to FCLK failed\n");
9479
9480 val = I915_READ(LCPLL_CTL);
9481 }
9482
9483 val |= LCPLL_PLL_DISABLE;
9484 I915_WRITE(LCPLL_CTL, val);
9485 POSTING_READ(LCPLL_CTL);
9486
9487 if (intel_wait_for_register(&dev_priv->uncore,
9488 LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9489 DRM_ERROR("LCPLL still locked\n");
9490
9491 val = hsw_read_dcomp(dev_priv);
9492 val |= D_COMP_COMP_DISABLE;
9493 hsw_write_dcomp(dev_priv, val);
9494 ndelay(100);
9495
9496 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9497 1))
9498 DRM_ERROR("D_COMP RCOMP still in progress\n");
9499
9500 if (allow_power_down) {
9501 val = I915_READ(LCPLL_CTL);
9502 val |= LCPLL_POWER_DOWN_ALLOW;
9503 I915_WRITE(LCPLL_CTL, val);
9504 POSTING_READ(LCPLL_CTL);
9505 }
9506 }
9507
9508 /*
9509 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9510 * source.
9511 */
9512 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9513 {
9514 u32 val;
9515
9516 val = I915_READ(LCPLL_CTL);
9517
9518 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9519 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9520 return;
9521
9522 /*
9523 * Make sure we're not on PC8 state before disabling PC8, otherwise
9524 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9525 */
9526 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
9527
9528 if (val & LCPLL_POWER_DOWN_ALLOW) {
9529 val &= ~LCPLL_POWER_DOWN_ALLOW;
9530 I915_WRITE(LCPLL_CTL, val);
9531 POSTING_READ(LCPLL_CTL);
9532 }
9533
9534 val = hsw_read_dcomp(dev_priv);
9535 val |= D_COMP_COMP_FORCE;
9536 val &= ~D_COMP_COMP_DISABLE;
9537 hsw_write_dcomp(dev_priv, val);
9538
9539 val = I915_READ(LCPLL_CTL);
9540 val &= ~LCPLL_PLL_DISABLE;
9541 I915_WRITE(LCPLL_CTL, val);
9542
9543 if (intel_wait_for_register(&dev_priv->uncore,
9544 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9545 5))
9546 DRM_ERROR("LCPLL not locked yet\n");
9547
9548 if (val & LCPLL_CD_SOURCE_FCLK) {
9549 val = I915_READ(LCPLL_CTL);
9550 val &= ~LCPLL_CD_SOURCE_FCLK;
9551 I915_WRITE(LCPLL_CTL, val);
9552
9553 if (wait_for_us((I915_READ(LCPLL_CTL) &
9554 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9555 DRM_ERROR("Switching back to LCPLL failed\n");
9556 }
9557
9558 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
9559
9560 intel_update_cdclk(dev_priv);
9561 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9562 }
9563
9564 /*
9565 * Package states C8 and deeper are really deep PC states that can only be
9566 * reached when all the devices on the system allow it, so even if the graphics
9567 * device allows PC8+, it doesn't mean the system will actually get to these
9568 * states. Our driver only allows PC8+ when going into runtime PM.
9569 *
9570 * The requirements for PC8+ are that all the outputs are disabled, the power
9571 * well is disabled and most interrupts are disabled, and these are also
9572 * requirements for runtime PM. When these conditions are met, we manually do
9573 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9574 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9575 * hang the machine.
9576 *
9577 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9578 * the state of some registers, so when we come back from PC8+ we need to
9579 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9580 * need to take care of the registers kept by RC6. Notice that this happens even
9581 * if we don't put the device in PCI D3 state (which is what currently happens
9582 * because of the runtime PM support).
9583 *
9584 * For more, read "Display Sequences for Package C8" on the hardware
9585 * documentation.
9586 */
9587 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9588 {
9589 u32 val;
9590
9591 DRM_DEBUG_KMS("Enabling package C8+\n");
9592
9593 if (HAS_PCH_LPT_LP(dev_priv)) {
9594 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9595 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9596 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9597 }
9598
9599 lpt_disable_clkout_dp(dev_priv);
9600 hsw_disable_lcpll(dev_priv, true, true);
9601 }
9602
9603 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9604 {
9605 u32 val;
9606
9607 DRM_DEBUG_KMS("Disabling package C8+\n");
9608
9609 hsw_restore_lcpll(dev_priv);
9610 lpt_init_pch_refclk(dev_priv);
9611
9612 if (HAS_PCH_LPT_LP(dev_priv)) {
9613 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9614 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9615 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9616 }
9617 }
9618
9619 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9620 struct intel_crtc_state *crtc_state)
9621 {
9622 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9623 struct intel_atomic_state *state =
9624 to_intel_atomic_state(crtc_state->base.state);
9625
9626 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9627 INTEL_GEN(dev_priv) >= 11) {
9628 struct intel_encoder *encoder =
9629 intel_get_crtc_new_encoder(state, crtc_state);
9630
9631 if (!intel_get_shared_dpll(crtc_state, encoder)) {
9632 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9633 pipe_name(crtc->pipe));
9634 return -EINVAL;
9635 }
9636 }
9637
9638 return 0;
9639 }
9640
9641 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9642 enum port port,
9643 struct intel_crtc_state *pipe_config)
9644 {
9645 enum intel_dpll_id id;
9646 u32 temp;
9647
9648 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9649 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9650
9651 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9652 return;
9653
9654 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9655 }
9656
9657 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9658 enum port port,
9659 struct intel_crtc_state *pipe_config)
9660 {
9661 enum intel_dpll_id id;
9662 u32 temp;
9663
9664 /* TODO: TBT pll not implemented. */
9665 if (intel_port_is_combophy(dev_priv, port)) {
9666 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9667 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9668 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9669 } else if (intel_port_is_tc(dev_priv, port)) {
9670 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9671 } else {
9672 WARN(1, "Invalid port %x\n", port);
9673 return;
9674 }
9675
9676 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9677 }
9678
9679 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9680 enum port port,
9681 struct intel_crtc_state *pipe_config)
9682 {
9683 enum intel_dpll_id id;
9684
9685 switch (port) {
9686 case PORT_A:
9687 id = DPLL_ID_SKL_DPLL0;
9688 break;
9689 case PORT_B:
9690 id = DPLL_ID_SKL_DPLL1;
9691 break;
9692 case PORT_C:
9693 id = DPLL_ID_SKL_DPLL2;
9694 break;
9695 default:
9696 DRM_ERROR("Incorrect port type\n");
9697 return;
9698 }
9699
9700 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9701 }
9702
9703 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9704 enum port port,
9705 struct intel_crtc_state *pipe_config)
9706 {
9707 enum intel_dpll_id id;
9708 u32 temp;
9709
9710 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9711 id = temp >> (port * 3 + 1);
9712
9713 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9714 return;
9715
9716 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9717 }
9718
9719 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9720 enum port port,
9721 struct intel_crtc_state *pipe_config)
9722 {
9723 enum intel_dpll_id id;
9724 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9725
9726 switch (ddi_pll_sel) {
9727 case PORT_CLK_SEL_WRPLL1:
9728 id = DPLL_ID_WRPLL1;
9729 break;
9730 case PORT_CLK_SEL_WRPLL2:
9731 id = DPLL_ID_WRPLL2;
9732 break;
9733 case PORT_CLK_SEL_SPLL:
9734 id = DPLL_ID_SPLL;
9735 break;
9736 case PORT_CLK_SEL_LCPLL_810:
9737 id = DPLL_ID_LCPLL_810;
9738 break;
9739 case PORT_CLK_SEL_LCPLL_1350:
9740 id = DPLL_ID_LCPLL_1350;
9741 break;
9742 case PORT_CLK_SEL_LCPLL_2700:
9743 id = DPLL_ID_LCPLL_2700;
9744 break;
9745 default:
9746 MISSING_CASE(ddi_pll_sel);
9747 /* fall through */
9748 case PORT_CLK_SEL_NONE:
9749 return;
9750 }
9751
9752 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9753 }
9754
9755 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9756 struct intel_crtc_state *pipe_config,
9757 u64 *power_domain_mask)
9758 {
9759 struct drm_device *dev = crtc->base.dev;
9760 struct drm_i915_private *dev_priv = to_i915(dev);
9761 enum intel_display_power_domain power_domain;
9762 unsigned long panel_transcoder_mask = 0;
9763 unsigned long enabled_panel_transcoders = 0;
9764 enum transcoder panel_transcoder;
9765 u32 tmp;
9766
9767 if (INTEL_GEN(dev_priv) >= 11)
9768 panel_transcoder_mask |=
9769 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9770
9771 if (HAS_TRANSCODER_EDP(dev_priv))
9772 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
9773
9774 /*
9775 * The pipe->transcoder mapping is fixed with the exception of the eDP
9776 * and DSI transcoders handled below.
9777 */
9778 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9779
9780 /*
9781 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9782 * consistency and less surprising code; it's in always on power).
9783 */
9784 for_each_set_bit(panel_transcoder,
9785 &panel_transcoder_mask,
9786 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9787 enum pipe trans_pipe;
9788
9789 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9790 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9791 continue;
9792
9793 /*
9794 * Log all enabled ones, only use the first one.
9795 *
9796 * FIXME: This won't work for two separate DSI displays.
9797 */
9798 enabled_panel_transcoders |= BIT(panel_transcoder);
9799 if (enabled_panel_transcoders != BIT(panel_transcoder))
9800 continue;
9801
9802 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9803 default:
9804 WARN(1, "unknown pipe linked to transcoder %s\n",
9805 transcoder_name(panel_transcoder));
9806 /* fall through */
9807 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9808 case TRANS_DDI_EDP_INPUT_A_ON:
9809 trans_pipe = PIPE_A;
9810 break;
9811 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9812 trans_pipe = PIPE_B;
9813 break;
9814 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9815 trans_pipe = PIPE_C;
9816 break;
9817 }
9818
9819 if (trans_pipe == crtc->pipe)
9820 pipe_config->cpu_transcoder = panel_transcoder;
9821 }
9822
9823 /*
9824 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9825 */
9826 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9827 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9828
9829 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9830 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9831 return false;
9832
9833 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9834 *power_domain_mask |= BIT_ULL(power_domain);
9835
9836 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9837
9838 return tmp & PIPECONF_ENABLE;
9839 }
9840
9841 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9842 struct intel_crtc_state *pipe_config,
9843 u64 *power_domain_mask)
9844 {
9845 struct drm_device *dev = crtc->base.dev;
9846 struct drm_i915_private *dev_priv = to_i915(dev);
9847 enum intel_display_power_domain power_domain;
9848 enum port port;
9849 enum transcoder cpu_transcoder;
9850 u32 tmp;
9851
9852 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9853 if (port == PORT_A)
9854 cpu_transcoder = TRANSCODER_DSI_A;
9855 else
9856 cpu_transcoder = TRANSCODER_DSI_C;
9857
9858 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9859 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9860 continue;
9861
9862 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9863 *power_domain_mask |= BIT_ULL(power_domain);
9864
9865 /*
9866 * The PLL needs to be enabled with a valid divider
9867 * configuration, otherwise accessing DSI registers will hang
9868 * the machine. See BSpec North Display Engine
9869 * registers/MIPI[BXT]. We can break out here early, since we
9870 * need the same DSI PLL to be enabled for both DSI ports.
9871 */
9872 if (!bxt_dsi_pll_is_enabled(dev_priv))
9873 break;
9874
9875 /* XXX: this works for video mode only */
9876 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9877 if (!(tmp & DPI_ENABLE))
9878 continue;
9879
9880 tmp = I915_READ(MIPI_CTRL(port));
9881 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9882 continue;
9883
9884 pipe_config->cpu_transcoder = cpu_transcoder;
9885 break;
9886 }
9887
9888 return transcoder_is_dsi(pipe_config->cpu_transcoder);
9889 }
9890
9891 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9892 struct intel_crtc_state *pipe_config)
9893 {
9894 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9895 struct intel_shared_dpll *pll;
9896 enum port port;
9897 u32 tmp;
9898
9899 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9900
9901 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9902
9903 if (INTEL_GEN(dev_priv) >= 11)
9904 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9905 else if (IS_CANNONLAKE(dev_priv))
9906 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9907 else if (IS_GEN9_BC(dev_priv))
9908 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9909 else if (IS_GEN9_LP(dev_priv))
9910 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9911 else
9912 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9913
9914 pll = pipe_config->shared_dpll;
9915 if (pll) {
9916 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9917 &pipe_config->dpll_hw_state));
9918 }
9919
9920 /*
9921 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9922 * DDI E. So just check whether this pipe is wired to DDI E and whether
9923 * the PCH transcoder is on.
9924 */
9925 if (INTEL_GEN(dev_priv) < 9 &&
9926 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9927 pipe_config->has_pch_encoder = true;
9928
9929 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9930 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9931 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9932
9933 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9934 }
9935 }
9936
9937 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9938 struct intel_crtc_state *pipe_config)
9939 {
9940 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9941 enum intel_display_power_domain power_domain;
9942 u64 power_domain_mask;
9943 bool active;
9944
9945 intel_crtc_init_scalers(crtc, pipe_config);
9946
9947 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9948 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9949 return false;
9950 power_domain_mask = BIT_ULL(power_domain);
9951
9952 pipe_config->shared_dpll = NULL;
9953
9954 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9955
9956 if (IS_GEN9_LP(dev_priv) &&
9957 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9958 WARN_ON(active);
9959 active = true;
9960 }
9961
9962 if (!active)
9963 goto out;
9964
9965 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
9966 INTEL_GEN(dev_priv) >= 11) {
9967 haswell_get_ddi_port_state(crtc, pipe_config);
9968 intel_get_pipe_timings(crtc, pipe_config);
9969 }
9970
9971 intel_get_pipe_src_size(crtc, pipe_config);
9972 intel_get_crtc_ycbcr_config(crtc, pipe_config);
9973
9974 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
9975
9976 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9977
9978 if (INTEL_GEN(dev_priv) >= 9) {
9979 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
9980
9981 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
9982 pipe_config->gamma_enable = true;
9983
9984 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
9985 pipe_config->csc_enable = true;
9986 } else {
9987 i9xx_get_pipe_color_config(pipe_config);
9988 }
9989
9990 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9991 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9992 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
9993 power_domain_mask |= BIT_ULL(power_domain);
9994
9995 if (INTEL_GEN(dev_priv) >= 9)
9996 skylake_get_pfit_config(crtc, pipe_config);
9997 else
9998 ironlake_get_pfit_config(crtc, pipe_config);
9999 }
10000
10001 if (hsw_crtc_supports_ips(crtc)) {
10002 if (IS_HASWELL(dev_priv))
10003 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10004 else {
10005 /*
10006 * We cannot readout IPS state on broadwell, set to
10007 * true so we can set it to a defined state on first
10008 * commit.
10009 */
10010 pipe_config->ips_enabled = true;
10011 }
10012 }
10013
10014 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10015 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10016 pipe_config->pixel_multiplier =
10017 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10018 } else {
10019 pipe_config->pixel_multiplier = 1;
10020 }
10021
10022 out:
10023 for_each_power_domain(power_domain, power_domain_mask)
10024 intel_display_power_put_unchecked(dev_priv, power_domain);
10025
10026 return active;
10027 }
10028
10029 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10030 {
10031 struct drm_i915_private *dev_priv =
10032 to_i915(plane_state->base.plane->dev);
10033 const struct drm_framebuffer *fb = plane_state->base.fb;
10034 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10035 u32 base;
10036
10037 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10038 base = obj->phys_handle->busaddr;
10039 else
10040 base = intel_plane_ggtt_offset(plane_state);
10041
10042 base += plane_state->color_plane[0].offset;
10043
10044 /* ILK+ do this automagically */
10045 if (HAS_GMCH(dev_priv) &&
10046 plane_state->base.rotation & DRM_MODE_ROTATE_180)
10047 base += (plane_state->base.crtc_h *
10048 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10049
10050 return base;
10051 }
10052
10053 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10054 {
10055 int x = plane_state->base.crtc_x;
10056 int y = plane_state->base.crtc_y;
10057 u32 pos = 0;
10058
10059 if (x < 0) {
10060 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10061 x = -x;
10062 }
10063 pos |= x << CURSOR_X_SHIFT;
10064
10065 if (y < 0) {
10066 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10067 y = -y;
10068 }
10069 pos |= y << CURSOR_Y_SHIFT;
10070
10071 return pos;
10072 }
10073
10074 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10075 {
10076 const struct drm_mode_config *config =
10077 &plane_state->base.plane->dev->mode_config;
10078 int width = plane_state->base.crtc_w;
10079 int height = plane_state->base.crtc_h;
10080
10081 return width > 0 && width <= config->cursor_width &&
10082 height > 0 && height <= config->cursor_height;
10083 }
10084
10085 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10086 {
10087 const struct drm_framebuffer *fb = plane_state->base.fb;
10088 unsigned int rotation = plane_state->base.rotation;
10089 int src_x, src_y;
10090 u32 offset;
10091 int ret;
10092
10093 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
10094 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
10095
10096 ret = intel_plane_check_stride(plane_state);
10097 if (ret)
10098 return ret;
10099
10100 src_x = plane_state->base.src_x >> 16;
10101 src_y = plane_state->base.src_y >> 16;
10102
10103 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10104 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10105 plane_state, 0);
10106
10107 if (src_x != 0 || src_y != 0) {
10108 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10109 return -EINVAL;
10110 }
10111
10112 plane_state->color_plane[0].offset = offset;
10113
10114 return 0;
10115 }
10116
10117 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10118 struct intel_plane_state *plane_state)
10119 {
10120 const struct drm_framebuffer *fb = plane_state->base.fb;
10121 int ret;
10122
10123 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10124 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10125 return -EINVAL;
10126 }
10127
10128 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10129 &crtc_state->base,
10130 DRM_PLANE_HELPER_NO_SCALING,
10131 DRM_PLANE_HELPER_NO_SCALING,
10132 true, true);
10133 if (ret)
10134 return ret;
10135
10136 if (!plane_state->base.visible)
10137 return 0;
10138
10139 ret = intel_plane_check_src_coordinates(plane_state);
10140 if (ret)
10141 return ret;
10142
10143 ret = intel_cursor_check_surface(plane_state);
10144 if (ret)
10145 return ret;
10146
10147 return 0;
10148 }
10149
10150 static unsigned int
10151 i845_cursor_max_stride(struct intel_plane *plane,
10152 u32 pixel_format, u64 modifier,
10153 unsigned int rotation)
10154 {
10155 return 2048;
10156 }
10157
10158 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10159 {
10160 u32 cntl = 0;
10161
10162 if (crtc_state->gamma_enable)
10163 cntl |= CURSOR_GAMMA_ENABLE;
10164
10165 return cntl;
10166 }
10167
10168 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10169 const struct intel_plane_state *plane_state)
10170 {
10171 return CURSOR_ENABLE |
10172 CURSOR_FORMAT_ARGB |
10173 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10174 }
10175
10176 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10177 {
10178 int width = plane_state->base.crtc_w;
10179
10180 /*
10181 * 845g/865g are only limited by the width of their cursors,
10182 * the height is arbitrary up to the precision of the register.
10183 */
10184 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10185 }
10186
10187 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10188 struct intel_plane_state *plane_state)
10189 {
10190 const struct drm_framebuffer *fb = plane_state->base.fb;
10191 int ret;
10192
10193 ret = intel_check_cursor(crtc_state, plane_state);
10194 if (ret)
10195 return ret;
10196
10197 /* if we want to turn off the cursor ignore width and height */
10198 if (!fb)
10199 return 0;
10200
10201 /* Check for which cursor types we support */
10202 if (!i845_cursor_size_ok(plane_state)) {
10203 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10204 plane_state->base.crtc_w,
10205 plane_state->base.crtc_h);
10206 return -EINVAL;
10207 }
10208
10209 WARN_ON(plane_state->base.visible &&
10210 plane_state->color_plane[0].stride != fb->pitches[0]);
10211
10212 switch (fb->pitches[0]) {
10213 case 256:
10214 case 512:
10215 case 1024:
10216 case 2048:
10217 break;
10218 default:
10219 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10220 fb->pitches[0]);
10221 return -EINVAL;
10222 }
10223
10224 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10225
10226 return 0;
10227 }
10228
10229 static void i845_update_cursor(struct intel_plane *plane,
10230 const struct intel_crtc_state *crtc_state,
10231 const struct intel_plane_state *plane_state)
10232 {
10233 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10234 u32 cntl = 0, base = 0, pos = 0, size = 0;
10235 unsigned long irqflags;
10236
10237 if (plane_state && plane_state->base.visible) {
10238 unsigned int width = plane_state->base.crtc_w;
10239 unsigned int height = plane_state->base.crtc_h;
10240
10241 cntl = plane_state->ctl |
10242 i845_cursor_ctl_crtc(crtc_state);
10243
10244 size = (height << 12) | width;
10245
10246 base = intel_cursor_base(plane_state);
10247 pos = intel_cursor_position(plane_state);
10248 }
10249
10250 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10251
10252 /* On these chipsets we can only modify the base/size/stride
10253 * whilst the cursor is disabled.
10254 */
10255 if (plane->cursor.base != base ||
10256 plane->cursor.size != size ||
10257 plane->cursor.cntl != cntl) {
10258 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10259 I915_WRITE_FW(CURBASE(PIPE_A), base);
10260 I915_WRITE_FW(CURSIZE, size);
10261 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10262 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10263
10264 plane->cursor.base = base;
10265 plane->cursor.size = size;
10266 plane->cursor.cntl = cntl;
10267 } else {
10268 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10269 }
10270
10271 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10272 }
10273
10274 static void i845_disable_cursor(struct intel_plane *plane,
10275 const struct intel_crtc_state *crtc_state)
10276 {
10277 i845_update_cursor(plane, crtc_state, NULL);
10278 }
10279
10280 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10281 enum pipe *pipe)
10282 {
10283 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10284 enum intel_display_power_domain power_domain;
10285 intel_wakeref_t wakeref;
10286 bool ret;
10287
10288 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10289 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10290 if (!wakeref)
10291 return false;
10292
10293 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10294
10295 *pipe = PIPE_A;
10296
10297 intel_display_power_put(dev_priv, power_domain, wakeref);
10298
10299 return ret;
10300 }
10301
10302 static unsigned int
10303 i9xx_cursor_max_stride(struct intel_plane *plane,
10304 u32 pixel_format, u64 modifier,
10305 unsigned int rotation)
10306 {
10307 return plane->base.dev->mode_config.cursor_width * 4;
10308 }
10309
10310 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10311 {
10312 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10313 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10314 u32 cntl = 0;
10315
10316 if (INTEL_GEN(dev_priv) >= 11)
10317 return cntl;
10318
10319 if (crtc_state->gamma_enable)
10320 cntl = MCURSOR_GAMMA_ENABLE;
10321
10322 if (crtc_state->csc_enable)
10323 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10324
10325 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10326 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10327
10328 return cntl;
10329 }
10330
10331 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10332 const struct intel_plane_state *plane_state)
10333 {
10334 struct drm_i915_private *dev_priv =
10335 to_i915(plane_state->base.plane->dev);
10336 u32 cntl = 0;
10337
10338 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10339 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10340
10341 switch (plane_state->base.crtc_w) {
10342 case 64:
10343 cntl |= MCURSOR_MODE_64_ARGB_AX;
10344 break;
10345 case 128:
10346 cntl |= MCURSOR_MODE_128_ARGB_AX;
10347 break;
10348 case 256:
10349 cntl |= MCURSOR_MODE_256_ARGB_AX;
10350 break;
10351 default:
10352 MISSING_CASE(plane_state->base.crtc_w);
10353 return 0;
10354 }
10355
10356 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10357 cntl |= MCURSOR_ROTATE_180;
10358
10359 return cntl;
10360 }
10361
10362 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10363 {
10364 struct drm_i915_private *dev_priv =
10365 to_i915(plane_state->base.plane->dev);
10366 int width = plane_state->base.crtc_w;
10367 int height = plane_state->base.crtc_h;
10368
10369 if (!intel_cursor_size_ok(plane_state))
10370 return false;
10371
10372 /* Cursor width is limited to a few power-of-two sizes */
10373 switch (width) {
10374 case 256:
10375 case 128:
10376 case 64:
10377 break;
10378 default:
10379 return false;
10380 }
10381
10382 /*
10383 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10384 * height from 8 lines up to the cursor width, when the
10385 * cursor is not rotated. Everything else requires square
10386 * cursors.
10387 */
10388 if (HAS_CUR_FBC(dev_priv) &&
10389 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10390 if (height < 8 || height > width)
10391 return false;
10392 } else {
10393 if (height != width)
10394 return false;
10395 }
10396
10397 return true;
10398 }
10399
10400 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10401 struct intel_plane_state *plane_state)
10402 {
10403 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10404 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10405 const struct drm_framebuffer *fb = plane_state->base.fb;
10406 enum pipe pipe = plane->pipe;
10407 int ret;
10408
10409 ret = intel_check_cursor(crtc_state, plane_state);
10410 if (ret)
10411 return ret;
10412
10413 /* if we want to turn off the cursor ignore width and height */
10414 if (!fb)
10415 return 0;
10416
10417 /* Check for which cursor types we support */
10418 if (!i9xx_cursor_size_ok(plane_state)) {
10419 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10420 plane_state->base.crtc_w,
10421 plane_state->base.crtc_h);
10422 return -EINVAL;
10423 }
10424
10425 WARN_ON(plane_state->base.visible &&
10426 plane_state->color_plane[0].stride != fb->pitches[0]);
10427
10428 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10429 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10430 fb->pitches[0], plane_state->base.crtc_w);
10431 return -EINVAL;
10432 }
10433
10434 /*
10435 * There's something wrong with the cursor on CHV pipe C.
10436 * If it straddles the left edge of the screen then
10437 * moving it away from the edge or disabling it often
10438 * results in a pipe underrun, and often that can lead to
10439 * dead pipe (constant underrun reported, and it scans
10440 * out just a solid color). To recover from that, the
10441 * display power well must be turned off and on again.
10442 * Refuse the put the cursor into that compromised position.
10443 */
10444 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10445 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10446 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10447 return -EINVAL;
10448 }
10449
10450 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10451
10452 return 0;
10453 }
10454
10455 static void i9xx_update_cursor(struct intel_plane *plane,
10456 const struct intel_crtc_state *crtc_state,
10457 const struct intel_plane_state *plane_state)
10458 {
10459 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10460 enum pipe pipe = plane->pipe;
10461 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10462 unsigned long irqflags;
10463
10464 if (plane_state && plane_state->base.visible) {
10465 cntl = plane_state->ctl |
10466 i9xx_cursor_ctl_crtc(crtc_state);
10467
10468 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10469 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10470
10471 base = intel_cursor_base(plane_state);
10472 pos = intel_cursor_position(plane_state);
10473 }
10474
10475 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10476
10477 /*
10478 * On some platforms writing CURCNTR first will also
10479 * cause CURPOS to be armed by the CURBASE write.
10480 * Without the CURCNTR write the CURPOS write would
10481 * arm itself. Thus we always update CURCNTR before
10482 * CURPOS.
10483 *
10484 * On other platforms CURPOS always requires the
10485 * CURBASE write to arm the update. Additonally
10486 * a write to any of the cursor register will cancel
10487 * an already armed cursor update. Thus leaving out
10488 * the CURBASE write after CURPOS could lead to a
10489 * cursor that doesn't appear to move, or even change
10490 * shape. Thus we always write CURBASE.
10491 *
10492 * The other registers are armed by by the CURBASE write
10493 * except when the plane is getting enabled at which time
10494 * the CURCNTR write arms the update.
10495 */
10496
10497 if (INTEL_GEN(dev_priv) >= 9)
10498 skl_write_cursor_wm(plane, crtc_state);
10499
10500 if (plane->cursor.base != base ||
10501 plane->cursor.size != fbc_ctl ||
10502 plane->cursor.cntl != cntl) {
10503 if (HAS_CUR_FBC(dev_priv))
10504 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10505 I915_WRITE_FW(CURCNTR(pipe), cntl);
10506 I915_WRITE_FW(CURPOS(pipe), pos);
10507 I915_WRITE_FW(CURBASE(pipe), base);
10508
10509 plane->cursor.base = base;
10510 plane->cursor.size = fbc_ctl;
10511 plane->cursor.cntl = cntl;
10512 } else {
10513 I915_WRITE_FW(CURPOS(pipe), pos);
10514 I915_WRITE_FW(CURBASE(pipe), base);
10515 }
10516
10517 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10518 }
10519
10520 static void i9xx_disable_cursor(struct intel_plane *plane,
10521 const struct intel_crtc_state *crtc_state)
10522 {
10523 i9xx_update_cursor(plane, crtc_state, NULL);
10524 }
10525
10526 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10527 enum pipe *pipe)
10528 {
10529 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10530 enum intel_display_power_domain power_domain;
10531 intel_wakeref_t wakeref;
10532 bool ret;
10533 u32 val;
10534
10535 /*
10536 * Not 100% correct for planes that can move between pipes,
10537 * but that's only the case for gen2-3 which don't have any
10538 * display power wells.
10539 */
10540 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10541 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10542 if (!wakeref)
10543 return false;
10544
10545 val = I915_READ(CURCNTR(plane->pipe));
10546
10547 ret = val & MCURSOR_MODE;
10548
10549 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10550 *pipe = plane->pipe;
10551 else
10552 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10553 MCURSOR_PIPE_SELECT_SHIFT;
10554
10555 intel_display_power_put(dev_priv, power_domain, wakeref);
10556
10557 return ret;
10558 }
10559
10560 /* VESA 640x480x72Hz mode to set on the pipe */
10561 static const struct drm_display_mode load_detect_mode = {
10562 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10563 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10564 };
10565
10566 struct drm_framebuffer *
10567 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10568 struct drm_mode_fb_cmd2 *mode_cmd)
10569 {
10570 struct intel_framebuffer *intel_fb;
10571 int ret;
10572
10573 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10574 if (!intel_fb)
10575 return ERR_PTR(-ENOMEM);
10576
10577 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10578 if (ret)
10579 goto err;
10580
10581 return &intel_fb->base;
10582
10583 err:
10584 kfree(intel_fb);
10585 return ERR_PTR(ret);
10586 }
10587
10588 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10589 struct drm_crtc *crtc)
10590 {
10591 struct drm_plane *plane;
10592 struct drm_plane_state *plane_state;
10593 int ret, i;
10594
10595 ret = drm_atomic_add_affected_planes(state, crtc);
10596 if (ret)
10597 return ret;
10598
10599 for_each_new_plane_in_state(state, plane, plane_state, i) {
10600 if (plane_state->crtc != crtc)
10601 continue;
10602
10603 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10604 if (ret)
10605 return ret;
10606
10607 drm_atomic_set_fb_for_plane(plane_state, NULL);
10608 }
10609
10610 return 0;
10611 }
10612
10613 int intel_get_load_detect_pipe(struct drm_connector *connector,
10614 const struct drm_display_mode *mode,
10615 struct intel_load_detect_pipe *old,
10616 struct drm_modeset_acquire_ctx *ctx)
10617 {
10618 struct intel_crtc *intel_crtc;
10619 struct intel_encoder *intel_encoder =
10620 intel_attached_encoder(connector);
10621 struct drm_crtc *possible_crtc;
10622 struct drm_encoder *encoder = &intel_encoder->base;
10623 struct drm_crtc *crtc = NULL;
10624 struct drm_device *dev = encoder->dev;
10625 struct drm_i915_private *dev_priv = to_i915(dev);
10626 struct drm_mode_config *config = &dev->mode_config;
10627 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10628 struct drm_connector_state *connector_state;
10629 struct intel_crtc_state *crtc_state;
10630 int ret, i = -1;
10631
10632 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10633 connector->base.id, connector->name,
10634 encoder->base.id, encoder->name);
10635
10636 old->restore_state = NULL;
10637
10638 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10639
10640 /*
10641 * Algorithm gets a little messy:
10642 *
10643 * - if the connector already has an assigned crtc, use it (but make
10644 * sure it's on first)
10645 *
10646 * - try to find the first unused crtc that can drive this connector,
10647 * and use that if we find one
10648 */
10649
10650 /* See if we already have a CRTC for this connector */
10651 if (connector->state->crtc) {
10652 crtc = connector->state->crtc;
10653
10654 ret = drm_modeset_lock(&crtc->mutex, ctx);
10655 if (ret)
10656 goto fail;
10657
10658 /* Make sure the crtc and connector are running */
10659 goto found;
10660 }
10661
10662 /* Find an unused one (if possible) */
10663 for_each_crtc(dev, possible_crtc) {
10664 i++;
10665 if (!(encoder->possible_crtcs & (1 << i)))
10666 continue;
10667
10668 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10669 if (ret)
10670 goto fail;
10671
10672 if (possible_crtc->state->enable) {
10673 drm_modeset_unlock(&possible_crtc->mutex);
10674 continue;
10675 }
10676
10677 crtc = possible_crtc;
10678 break;
10679 }
10680
10681 /*
10682 * If we didn't find an unused CRTC, don't use any.
10683 */
10684 if (!crtc) {
10685 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10686 ret = -ENODEV;
10687 goto fail;
10688 }
10689
10690 found:
10691 intel_crtc = to_intel_crtc(crtc);
10692
10693 state = drm_atomic_state_alloc(dev);
10694 restore_state = drm_atomic_state_alloc(dev);
10695 if (!state || !restore_state) {
10696 ret = -ENOMEM;
10697 goto fail;
10698 }
10699
10700 state->acquire_ctx = ctx;
10701 restore_state->acquire_ctx = ctx;
10702
10703 connector_state = drm_atomic_get_connector_state(state, connector);
10704 if (IS_ERR(connector_state)) {
10705 ret = PTR_ERR(connector_state);
10706 goto fail;
10707 }
10708
10709 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10710 if (ret)
10711 goto fail;
10712
10713 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10714 if (IS_ERR(crtc_state)) {
10715 ret = PTR_ERR(crtc_state);
10716 goto fail;
10717 }
10718
10719 crtc_state->base.active = crtc_state->base.enable = true;
10720
10721 if (!mode)
10722 mode = &load_detect_mode;
10723
10724 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10725 if (ret)
10726 goto fail;
10727
10728 ret = intel_modeset_disable_planes(state, crtc);
10729 if (ret)
10730 goto fail;
10731
10732 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10733 if (!ret)
10734 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10735 if (!ret)
10736 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10737 if (ret) {
10738 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10739 goto fail;
10740 }
10741
10742 ret = drm_atomic_commit(state);
10743 if (ret) {
10744 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10745 goto fail;
10746 }
10747
10748 old->restore_state = restore_state;
10749 drm_atomic_state_put(state);
10750
10751 /* let the connector get through one full cycle before testing */
10752 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10753 return true;
10754
10755 fail:
10756 if (state) {
10757 drm_atomic_state_put(state);
10758 state = NULL;
10759 }
10760 if (restore_state) {
10761 drm_atomic_state_put(restore_state);
10762 restore_state = NULL;
10763 }
10764
10765 if (ret == -EDEADLK)
10766 return ret;
10767
10768 return false;
10769 }
10770
10771 void intel_release_load_detect_pipe(struct drm_connector *connector,
10772 struct intel_load_detect_pipe *old,
10773 struct drm_modeset_acquire_ctx *ctx)
10774 {
10775 struct intel_encoder *intel_encoder =
10776 intel_attached_encoder(connector);
10777 struct drm_encoder *encoder = &intel_encoder->base;
10778 struct drm_atomic_state *state = old->restore_state;
10779 int ret;
10780
10781 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10782 connector->base.id, connector->name,
10783 encoder->base.id, encoder->name);
10784
10785 if (!state)
10786 return;
10787
10788 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10789 if (ret)
10790 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10791 drm_atomic_state_put(state);
10792 }
10793
10794 static int i9xx_pll_refclk(struct drm_device *dev,
10795 const struct intel_crtc_state *pipe_config)
10796 {
10797 struct drm_i915_private *dev_priv = to_i915(dev);
10798 u32 dpll = pipe_config->dpll_hw_state.dpll;
10799
10800 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10801 return dev_priv->vbt.lvds_ssc_freq;
10802 else if (HAS_PCH_SPLIT(dev_priv))
10803 return 120000;
10804 else if (!IS_GEN(dev_priv, 2))
10805 return 96000;
10806 else
10807 return 48000;
10808 }
10809
10810 /* Returns the clock of the currently programmed mode of the given pipe. */
10811 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10812 struct intel_crtc_state *pipe_config)
10813 {
10814 struct drm_device *dev = crtc->base.dev;
10815 struct drm_i915_private *dev_priv = to_i915(dev);
10816 int pipe = pipe_config->cpu_transcoder;
10817 u32 dpll = pipe_config->dpll_hw_state.dpll;
10818 u32 fp;
10819 struct dpll clock;
10820 int port_clock;
10821 int refclk = i9xx_pll_refclk(dev, pipe_config);
10822
10823 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10824 fp = pipe_config->dpll_hw_state.fp0;
10825 else
10826 fp = pipe_config->dpll_hw_state.fp1;
10827
10828 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10829 if (IS_PINEVIEW(dev_priv)) {
10830 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10831 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10832 } else {
10833 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10834 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10835 }
10836
10837 if (!IS_GEN(dev_priv, 2)) {
10838 if (IS_PINEVIEW(dev_priv))
10839 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10840 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10841 else
10842 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10843 DPLL_FPA01_P1_POST_DIV_SHIFT);
10844
10845 switch (dpll & DPLL_MODE_MASK) {
10846 case DPLLB_MODE_DAC_SERIAL:
10847 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10848 5 : 10;
10849 break;
10850 case DPLLB_MODE_LVDS:
10851 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10852 7 : 14;
10853 break;
10854 default:
10855 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10856 "mode\n", (int)(dpll & DPLL_MODE_MASK));
10857 return;
10858 }
10859
10860 if (IS_PINEVIEW(dev_priv))
10861 port_clock = pnv_calc_dpll_params(refclk, &clock);
10862 else
10863 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10864 } else {
10865 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10866 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10867
10868 if (is_lvds) {
10869 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10870 DPLL_FPA01_P1_POST_DIV_SHIFT);
10871
10872 if (lvds & LVDS_CLKB_POWER_UP)
10873 clock.p2 = 7;
10874 else
10875 clock.p2 = 14;
10876 } else {
10877 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10878 clock.p1 = 2;
10879 else {
10880 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10881 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10882 }
10883 if (dpll & PLL_P2_DIVIDE_BY_4)
10884 clock.p2 = 4;
10885 else
10886 clock.p2 = 2;
10887 }
10888
10889 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10890 }
10891
10892 /*
10893 * This value includes pixel_multiplier. We will use
10894 * port_clock to compute adjusted_mode.crtc_clock in the
10895 * encoder's get_config() function.
10896 */
10897 pipe_config->port_clock = port_clock;
10898 }
10899
10900 int intel_dotclock_calculate(int link_freq,
10901 const struct intel_link_m_n *m_n)
10902 {
10903 /*
10904 * The calculation for the data clock is:
10905 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10906 * But we want to avoid losing precison if possible, so:
10907 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10908 *
10909 * and the link clock is simpler:
10910 * link_clock = (m * link_clock) / n
10911 */
10912
10913 if (!m_n->link_n)
10914 return 0;
10915
10916 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10917 }
10918
10919 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10920 struct intel_crtc_state *pipe_config)
10921 {
10922 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10923
10924 /* read out port_clock from the DPLL */
10925 i9xx_crtc_clock_get(crtc, pipe_config);
10926
10927 /*
10928 * In case there is an active pipe without active ports,
10929 * we may need some idea for the dotclock anyway.
10930 * Calculate one based on the FDI configuration.
10931 */
10932 pipe_config->base.adjusted_mode.crtc_clock =
10933 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10934 &pipe_config->fdi_m_n);
10935 }
10936
10937 /* Returns the currently programmed mode of the given encoder. */
10938 struct drm_display_mode *
10939 intel_encoder_current_mode(struct intel_encoder *encoder)
10940 {
10941 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10942 struct intel_crtc_state *crtc_state;
10943 struct drm_display_mode *mode;
10944 struct intel_crtc *crtc;
10945 enum pipe pipe;
10946
10947 if (!encoder->get_hw_state(encoder, &pipe))
10948 return NULL;
10949
10950 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10951
10952 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10953 if (!mode)
10954 return NULL;
10955
10956 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10957 if (!crtc_state) {
10958 kfree(mode);
10959 return NULL;
10960 }
10961
10962 crtc_state->base.crtc = &crtc->base;
10963
10964 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10965 kfree(crtc_state);
10966 kfree(mode);
10967 return NULL;
10968 }
10969
10970 encoder->get_config(encoder, crtc_state);
10971
10972 intel_mode_from_pipe_config(mode, crtc_state);
10973
10974 kfree(crtc_state);
10975
10976 return mode;
10977 }
10978
10979 static void intel_crtc_destroy(struct drm_crtc *crtc)
10980 {
10981 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10982
10983 drm_crtc_cleanup(crtc);
10984 kfree(intel_crtc);
10985 }
10986
10987 /**
10988 * intel_wm_need_update - Check whether watermarks need updating
10989 * @cur: current plane state
10990 * @new: new plane state
10991 *
10992 * Check current plane state versus the new one to determine whether
10993 * watermarks need to be recalculated.
10994 *
10995 * Returns true or false.
10996 */
10997 static bool intel_wm_need_update(struct intel_plane_state *cur,
10998 struct intel_plane_state *new)
10999 {
11000 /* Update watermarks on tiling or size changes. */
11001 if (new->base.visible != cur->base.visible)
11002 return true;
11003
11004 if (!cur->base.fb || !new->base.fb)
11005 return false;
11006
11007 if (cur->base.fb->modifier != new->base.fb->modifier ||
11008 cur->base.rotation != new->base.rotation ||
11009 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11010 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11011 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11012 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11013 return true;
11014
11015 return false;
11016 }
11017
11018 static bool needs_scaling(const struct intel_plane_state *state)
11019 {
11020 int src_w = drm_rect_width(&state->base.src) >> 16;
11021 int src_h = drm_rect_height(&state->base.src) >> 16;
11022 int dst_w = drm_rect_width(&state->base.dst);
11023 int dst_h = drm_rect_height(&state->base.dst);
11024
11025 return (src_w != dst_w || src_h != dst_h);
11026 }
11027
11028 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11029 struct drm_crtc_state *crtc_state,
11030 const struct intel_plane_state *old_plane_state,
11031 struct drm_plane_state *plane_state)
11032 {
11033 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11034 struct drm_crtc *crtc = crtc_state->crtc;
11035 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11036 struct intel_plane *plane = to_intel_plane(plane_state->plane);
11037 struct drm_device *dev = crtc->dev;
11038 struct drm_i915_private *dev_priv = to_i915(dev);
11039 bool mode_changed = needs_modeset(crtc_state);
11040 bool was_crtc_enabled = old_crtc_state->base.active;
11041 bool is_crtc_enabled = crtc_state->active;
11042 bool turn_off, turn_on, visible, was_visible;
11043 struct drm_framebuffer *fb = plane_state->fb;
11044 int ret;
11045
11046 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11047 ret = skl_update_scaler_plane(
11048 to_intel_crtc_state(crtc_state),
11049 to_intel_plane_state(plane_state));
11050 if (ret)
11051 return ret;
11052 }
11053
11054 was_visible = old_plane_state->base.visible;
11055 visible = plane_state->visible;
11056
11057 if (!was_crtc_enabled && WARN_ON(was_visible))
11058 was_visible = false;
11059
11060 /*
11061 * Visibility is calculated as if the crtc was on, but
11062 * after scaler setup everything depends on it being off
11063 * when the crtc isn't active.
11064 *
11065 * FIXME this is wrong for watermarks. Watermarks should also
11066 * be computed as if the pipe would be active. Perhaps move
11067 * per-plane wm computation to the .check_plane() hook, and
11068 * only combine the results from all planes in the current place?
11069 */
11070 if (!is_crtc_enabled) {
11071 plane_state->visible = visible = false;
11072 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11073 }
11074
11075 if (!was_visible && !visible)
11076 return 0;
11077
11078 if (fb != old_plane_state->base.fb)
11079 pipe_config->fb_changed = true;
11080
11081 turn_off = was_visible && (!visible || mode_changed);
11082 turn_on = visible && (!was_visible || mode_changed);
11083
11084 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11085 intel_crtc->base.base.id, intel_crtc->base.name,
11086 plane->base.base.id, plane->base.name,
11087 fb ? fb->base.id : -1);
11088
11089 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11090 plane->base.base.id, plane->base.name,
11091 was_visible, visible,
11092 turn_off, turn_on, mode_changed);
11093
11094 if (turn_on) {
11095 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11096 pipe_config->update_wm_pre = true;
11097
11098 /* must disable cxsr around plane enable/disable */
11099 if (plane->id != PLANE_CURSOR)
11100 pipe_config->disable_cxsr = true;
11101 } else if (turn_off) {
11102 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11103 pipe_config->update_wm_post = true;
11104
11105 /* must disable cxsr around plane enable/disable */
11106 if (plane->id != PLANE_CURSOR)
11107 pipe_config->disable_cxsr = true;
11108 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11109 to_intel_plane_state(plane_state))) {
11110 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11111 /* FIXME bollocks */
11112 pipe_config->update_wm_pre = true;
11113 pipe_config->update_wm_post = true;
11114 }
11115 }
11116
11117 if (visible || was_visible)
11118 pipe_config->fb_bits |= plane->frontbuffer_bit;
11119
11120 /*
11121 * ILK/SNB DVSACNTR/Sprite Enable
11122 * IVB SPR_CTL/Sprite Enable
11123 * "When in Self Refresh Big FIFO mode, a write to enable the
11124 * plane will be internally buffered and delayed while Big FIFO
11125 * mode is exiting."
11126 *
11127 * Which means that enabling the sprite can take an extra frame
11128 * when we start in big FIFO mode (LP1+). Thus we need to drop
11129 * down to LP0 and wait for vblank in order to make sure the
11130 * sprite gets enabled on the next vblank after the register write.
11131 * Doing otherwise would risk enabling the sprite one frame after
11132 * we've already signalled flip completion. We can resume LP1+
11133 * once the sprite has been enabled.
11134 *
11135 *
11136 * WaCxSRDisabledForSpriteScaling:ivb
11137 * IVB SPR_SCALE/Scaling Enable
11138 * "Low Power watermarks must be disabled for at least one
11139 * frame before enabling sprite scaling, and kept disabled
11140 * until sprite scaling is disabled."
11141 *
11142 * ILK/SNB DVSASCALE/Scaling Enable
11143 * "When in Self Refresh Big FIFO mode, scaling enable will be
11144 * masked off while Big FIFO mode is exiting."
11145 *
11146 * Despite the w/a only being listed for IVB we assume that
11147 * the ILK/SNB note has similar ramifications, hence we apply
11148 * the w/a on all three platforms.
11149 *
11150 * With experimental results seems this is needed also for primary
11151 * plane, not only sprite plane.
11152 */
11153 if (plane->id != PLANE_CURSOR &&
11154 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11155 IS_IVYBRIDGE(dev_priv)) &&
11156 (turn_on || (!needs_scaling(old_plane_state) &&
11157 needs_scaling(to_intel_plane_state(plane_state)))))
11158 pipe_config->disable_lp_wm = true;
11159
11160 return 0;
11161 }
11162
11163 static bool encoders_cloneable(const struct intel_encoder *a,
11164 const struct intel_encoder *b)
11165 {
11166 /* masks could be asymmetric, so check both ways */
11167 return a == b || (a->cloneable & (1 << b->type) &&
11168 b->cloneable & (1 << a->type));
11169 }
11170
11171 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11172 struct intel_crtc *crtc,
11173 struct intel_encoder *encoder)
11174 {
11175 struct intel_encoder *source_encoder;
11176 struct drm_connector *connector;
11177 struct drm_connector_state *connector_state;
11178 int i;
11179
11180 for_each_new_connector_in_state(state, connector, connector_state, i) {
11181 if (connector_state->crtc != &crtc->base)
11182 continue;
11183
11184 source_encoder =
11185 to_intel_encoder(connector_state->best_encoder);
11186 if (!encoders_cloneable(encoder, source_encoder))
11187 return false;
11188 }
11189
11190 return true;
11191 }
11192
11193 static int icl_add_linked_planes(struct intel_atomic_state *state)
11194 {
11195 struct intel_plane *plane, *linked;
11196 struct intel_plane_state *plane_state, *linked_plane_state;
11197 int i;
11198
11199 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11200 linked = plane_state->linked_plane;
11201
11202 if (!linked)
11203 continue;
11204
11205 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11206 if (IS_ERR(linked_plane_state))
11207 return PTR_ERR(linked_plane_state);
11208
11209 WARN_ON(linked_plane_state->linked_plane != plane);
11210 WARN_ON(linked_plane_state->slave == plane_state->slave);
11211 }
11212
11213 return 0;
11214 }
11215
11216 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11217 {
11218 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11219 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11220 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11221 struct intel_plane *plane, *linked;
11222 struct intel_plane_state *plane_state;
11223 int i;
11224
11225 if (INTEL_GEN(dev_priv) < 11)
11226 return 0;
11227
11228 /*
11229 * Destroy all old plane links and make the slave plane invisible
11230 * in the crtc_state->active_planes mask.
11231 */
11232 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11233 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11234 continue;
11235
11236 plane_state->linked_plane = NULL;
11237 if (plane_state->slave && !plane_state->base.visible) {
11238 crtc_state->active_planes &= ~BIT(plane->id);
11239 crtc_state->update_planes |= BIT(plane->id);
11240 }
11241
11242 plane_state->slave = false;
11243 }
11244
11245 if (!crtc_state->nv12_planes)
11246 return 0;
11247
11248 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11249 struct intel_plane_state *linked_state = NULL;
11250
11251 if (plane->pipe != crtc->pipe ||
11252 !(crtc_state->nv12_planes & BIT(plane->id)))
11253 continue;
11254
11255 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11256 if (!icl_is_nv12_y_plane(linked->id))
11257 continue;
11258
11259 if (crtc_state->active_planes & BIT(linked->id))
11260 continue;
11261
11262 linked_state = intel_atomic_get_plane_state(state, linked);
11263 if (IS_ERR(linked_state))
11264 return PTR_ERR(linked_state);
11265
11266 break;
11267 }
11268
11269 if (!linked_state) {
11270 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11271 hweight8(crtc_state->nv12_planes));
11272
11273 return -EINVAL;
11274 }
11275
11276 plane_state->linked_plane = linked;
11277
11278 linked_state->slave = true;
11279 linked_state->linked_plane = plane;
11280 crtc_state->active_planes |= BIT(linked->id);
11281 crtc_state->update_planes |= BIT(linked->id);
11282 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11283 }
11284
11285 return 0;
11286 }
11287
11288 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11289 struct drm_crtc_state *crtc_state)
11290 {
11291 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11292 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11293 struct intel_crtc_state *pipe_config =
11294 to_intel_crtc_state(crtc_state);
11295 int ret;
11296 bool mode_changed = needs_modeset(crtc_state);
11297
11298 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11299 mode_changed && !crtc_state->active)
11300 pipe_config->update_wm_post = true;
11301
11302 if (mode_changed && crtc_state->enable &&
11303 dev_priv->display.crtc_compute_clock &&
11304 !WARN_ON(pipe_config->shared_dpll)) {
11305 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11306 pipe_config);
11307 if (ret)
11308 return ret;
11309 }
11310
11311 if (mode_changed || pipe_config->update_pipe ||
11312 crtc_state->color_mgmt_changed) {
11313 ret = intel_color_check(pipe_config);
11314 if (ret)
11315 return ret;
11316 }
11317
11318 ret = 0;
11319 if (dev_priv->display.compute_pipe_wm) {
11320 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11321 if (ret) {
11322 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11323 return ret;
11324 }
11325 }
11326
11327 if (dev_priv->display.compute_intermediate_wm) {
11328 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11329 return 0;
11330
11331 /*
11332 * Calculate 'intermediate' watermarks that satisfy both the
11333 * old state and the new state. We can program these
11334 * immediately.
11335 */
11336 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11337 if (ret) {
11338 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11339 return ret;
11340 }
11341 }
11342
11343 if (INTEL_GEN(dev_priv) >= 9) {
11344 if (mode_changed || pipe_config->update_pipe)
11345 ret = skl_update_scaler_crtc(pipe_config);
11346
11347 if (!ret)
11348 ret = icl_check_nv12_planes(pipe_config);
11349 if (!ret)
11350 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11351 pipe_config);
11352 if (!ret)
11353 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11354 pipe_config);
11355 }
11356
11357 if (HAS_IPS(dev_priv))
11358 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11359
11360 return ret;
11361 }
11362
11363 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11364 .atomic_check = intel_crtc_atomic_check,
11365 };
11366
11367 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11368 {
11369 struct intel_connector *connector;
11370 struct drm_connector_list_iter conn_iter;
11371
11372 drm_connector_list_iter_begin(dev, &conn_iter);
11373 for_each_intel_connector_iter(connector, &conn_iter) {
11374 if (connector->base.state->crtc)
11375 drm_connector_put(&connector->base);
11376
11377 if (connector->base.encoder) {
11378 connector->base.state->best_encoder =
11379 connector->base.encoder;
11380 connector->base.state->crtc =
11381 connector->base.encoder->crtc;
11382
11383 drm_connector_get(&connector->base);
11384 } else {
11385 connector->base.state->best_encoder = NULL;
11386 connector->base.state->crtc = NULL;
11387 }
11388 }
11389 drm_connector_list_iter_end(&conn_iter);
11390 }
11391
11392 static int
11393 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11394 struct intel_crtc_state *pipe_config)
11395 {
11396 struct drm_connector *connector = conn_state->connector;
11397 const struct drm_display_info *info = &connector->display_info;
11398 int bpp;
11399
11400 switch (conn_state->max_bpc) {
11401 case 6 ... 7:
11402 bpp = 6 * 3;
11403 break;
11404 case 8 ... 9:
11405 bpp = 8 * 3;
11406 break;
11407 case 10 ... 11:
11408 bpp = 10 * 3;
11409 break;
11410 case 12:
11411 bpp = 12 * 3;
11412 break;
11413 default:
11414 return -EINVAL;
11415 }
11416
11417 if (bpp < pipe_config->pipe_bpp) {
11418 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11419 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11420 connector->base.id, connector->name,
11421 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11422 pipe_config->pipe_bpp);
11423
11424 pipe_config->pipe_bpp = bpp;
11425 }
11426
11427 return 0;
11428 }
11429
11430 static int
11431 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11432 struct intel_crtc_state *pipe_config)
11433 {
11434 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11435 struct drm_atomic_state *state = pipe_config->base.state;
11436 struct drm_connector *connector;
11437 struct drm_connector_state *connector_state;
11438 int bpp, i;
11439
11440 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11441 IS_CHERRYVIEW(dev_priv)))
11442 bpp = 10*3;
11443 else if (INTEL_GEN(dev_priv) >= 5)
11444 bpp = 12*3;
11445 else
11446 bpp = 8*3;
11447
11448 pipe_config->pipe_bpp = bpp;
11449
11450 /* Clamp display bpp to connector max bpp */
11451 for_each_new_connector_in_state(state, connector, connector_state, i) {
11452 int ret;
11453
11454 if (connector_state->crtc != &crtc->base)
11455 continue;
11456
11457 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11458 if (ret)
11459 return ret;
11460 }
11461
11462 return 0;
11463 }
11464
11465 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11466 {
11467 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11468 "type: 0x%x flags: 0x%x\n",
11469 mode->crtc_clock,
11470 mode->crtc_hdisplay, mode->crtc_hsync_start,
11471 mode->crtc_hsync_end, mode->crtc_htotal,
11472 mode->crtc_vdisplay, mode->crtc_vsync_start,
11473 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11474 }
11475
11476 static inline void
11477 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11478 unsigned int lane_count, struct intel_link_m_n *m_n)
11479 {
11480 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11481 id, lane_count,
11482 m_n->gmch_m, m_n->gmch_n,
11483 m_n->link_m, m_n->link_n, m_n->tu);
11484 }
11485
11486 static void
11487 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11488 const union hdmi_infoframe *frame)
11489 {
11490 if ((drm_debug & DRM_UT_KMS) == 0)
11491 return;
11492
11493 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11494 }
11495
11496 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11497
11498 static const char * const output_type_str[] = {
11499 OUTPUT_TYPE(UNUSED),
11500 OUTPUT_TYPE(ANALOG),
11501 OUTPUT_TYPE(DVO),
11502 OUTPUT_TYPE(SDVO),
11503 OUTPUT_TYPE(LVDS),
11504 OUTPUT_TYPE(TVOUT),
11505 OUTPUT_TYPE(HDMI),
11506 OUTPUT_TYPE(DP),
11507 OUTPUT_TYPE(EDP),
11508 OUTPUT_TYPE(DSI),
11509 OUTPUT_TYPE(DDI),
11510 OUTPUT_TYPE(DP_MST),
11511 };
11512
11513 #undef OUTPUT_TYPE
11514
11515 static void snprintf_output_types(char *buf, size_t len,
11516 unsigned int output_types)
11517 {
11518 char *str = buf;
11519 int i;
11520
11521 str[0] = '\0';
11522
11523 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11524 int r;
11525
11526 if ((output_types & BIT(i)) == 0)
11527 continue;
11528
11529 r = snprintf(str, len, "%s%s",
11530 str != buf ? "," : "", output_type_str[i]);
11531 if (r >= len)
11532 break;
11533 str += r;
11534 len -= r;
11535
11536 output_types &= ~BIT(i);
11537 }
11538
11539 WARN_ON_ONCE(output_types != 0);
11540 }
11541
11542 static const char * const output_format_str[] = {
11543 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11544 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11545 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11546 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11547 };
11548
11549 static const char *output_formats(enum intel_output_format format)
11550 {
11551 if (format >= ARRAY_SIZE(output_format_str))
11552 format = INTEL_OUTPUT_FORMAT_INVALID;
11553 return output_format_str[format];
11554 }
11555
11556 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11557 struct intel_crtc_state *pipe_config,
11558 const char *context)
11559 {
11560 struct drm_device *dev = crtc->base.dev;
11561 struct drm_i915_private *dev_priv = to_i915(dev);
11562 struct drm_plane *plane;
11563 struct intel_plane *intel_plane;
11564 struct intel_plane_state *state;
11565 struct drm_framebuffer *fb;
11566 char buf[64];
11567
11568 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11569 crtc->base.base.id, crtc->base.name, context);
11570
11571 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11572 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11573 buf, pipe_config->output_types);
11574
11575 DRM_DEBUG_KMS("output format: %s\n",
11576 output_formats(pipe_config->output_format));
11577
11578 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11579 transcoder_name(pipe_config->cpu_transcoder),
11580 pipe_config->pipe_bpp, pipe_config->dither);
11581
11582 if (pipe_config->has_pch_encoder)
11583 intel_dump_m_n_config(pipe_config, "fdi",
11584 pipe_config->fdi_lanes,
11585 &pipe_config->fdi_m_n);
11586
11587 if (intel_crtc_has_dp_encoder(pipe_config)) {
11588 intel_dump_m_n_config(pipe_config, "dp m_n",
11589 pipe_config->lane_count, &pipe_config->dp_m_n);
11590 if (pipe_config->has_drrs)
11591 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11592 pipe_config->lane_count,
11593 &pipe_config->dp_m2_n2);
11594 }
11595
11596 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11597 pipe_config->has_audio, pipe_config->has_infoframe);
11598
11599 DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
11600 pipe_config->infoframes.enable);
11601
11602 if (pipe_config->infoframes.enable &
11603 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11604 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11605 if (pipe_config->infoframes.enable &
11606 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11607 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11608 if (pipe_config->infoframes.enable &
11609 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11610 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11611 if (pipe_config->infoframes.enable &
11612 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11613 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11614
11615 DRM_DEBUG_KMS("requested mode:\n");
11616 drm_mode_debug_printmodeline(&pipe_config->base.mode);
11617 DRM_DEBUG_KMS("adjusted mode:\n");
11618 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11619 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11620 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11621 pipe_config->port_clock,
11622 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11623 pipe_config->pixel_rate);
11624
11625 if (INTEL_GEN(dev_priv) >= 9)
11626 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11627 crtc->num_scalers,
11628 pipe_config->scaler_state.scaler_users,
11629 pipe_config->scaler_state.scaler_id);
11630
11631 if (HAS_GMCH(dev_priv))
11632 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11633 pipe_config->gmch_pfit.control,
11634 pipe_config->gmch_pfit.pgm_ratios,
11635 pipe_config->gmch_pfit.lvds_border_bits);
11636 else
11637 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11638 pipe_config->pch_pfit.pos,
11639 pipe_config->pch_pfit.size,
11640 enableddisabled(pipe_config->pch_pfit.enabled));
11641
11642 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11643 pipe_config->ips_enabled, pipe_config->double_wide);
11644
11645 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11646
11647 DRM_DEBUG_KMS("planes on this crtc\n");
11648 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11649 struct drm_format_name_buf format_name;
11650 intel_plane = to_intel_plane(plane);
11651 if (intel_plane->pipe != crtc->pipe)
11652 continue;
11653
11654 state = to_intel_plane_state(plane->state);
11655 fb = state->base.fb;
11656 if (!fb) {
11657 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11658 plane->base.id, plane->name, state->scaler_id);
11659 continue;
11660 }
11661
11662 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11663 plane->base.id, plane->name,
11664 fb->base.id, fb->width, fb->height,
11665 drm_get_format_name(fb->format->format, &format_name));
11666 if (INTEL_GEN(dev_priv) >= 9)
11667 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11668 state->scaler_id,
11669 state->base.src.x1 >> 16,
11670 state->base.src.y1 >> 16,
11671 drm_rect_width(&state->base.src) >> 16,
11672 drm_rect_height(&state->base.src) >> 16,
11673 state->base.dst.x1, state->base.dst.y1,
11674 drm_rect_width(&state->base.dst),
11675 drm_rect_height(&state->base.dst));
11676 }
11677 }
11678
11679 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11680 {
11681 struct drm_device *dev = state->dev;
11682 struct drm_connector *connector;
11683 struct drm_connector_list_iter conn_iter;
11684 unsigned int used_ports = 0;
11685 unsigned int used_mst_ports = 0;
11686 bool ret = true;
11687
11688 /*
11689 * Walk the connector list instead of the encoder
11690 * list to detect the problem on ddi platforms
11691 * where there's just one encoder per digital port.
11692 */
11693 drm_connector_list_iter_begin(dev, &conn_iter);
11694 drm_for_each_connector_iter(connector, &conn_iter) {
11695 struct drm_connector_state *connector_state;
11696 struct intel_encoder *encoder;
11697
11698 connector_state = drm_atomic_get_new_connector_state(state, connector);
11699 if (!connector_state)
11700 connector_state = connector->state;
11701
11702 if (!connector_state->best_encoder)
11703 continue;
11704
11705 encoder = to_intel_encoder(connector_state->best_encoder);
11706
11707 WARN_ON(!connector_state->crtc);
11708
11709 switch (encoder->type) {
11710 unsigned int port_mask;
11711 case INTEL_OUTPUT_DDI:
11712 if (WARN_ON(!HAS_DDI(to_i915(dev))))
11713 break;
11714 /* else: fall through */
11715 case INTEL_OUTPUT_DP:
11716 case INTEL_OUTPUT_HDMI:
11717 case INTEL_OUTPUT_EDP:
11718 port_mask = 1 << encoder->port;
11719
11720 /* the same port mustn't appear more than once */
11721 if (used_ports & port_mask)
11722 ret = false;
11723
11724 used_ports |= port_mask;
11725 break;
11726 case INTEL_OUTPUT_DP_MST:
11727 used_mst_ports |=
11728 1 << encoder->port;
11729 break;
11730 default:
11731 break;
11732 }
11733 }
11734 drm_connector_list_iter_end(&conn_iter);
11735
11736 /* can't mix MST and SST/HDMI on the same port */
11737 if (used_ports & used_mst_ports)
11738 return false;
11739
11740 return ret;
11741 }
11742
11743 static int
11744 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11745 {
11746 struct drm_i915_private *dev_priv =
11747 to_i915(crtc_state->base.crtc->dev);
11748 struct intel_crtc_state *saved_state;
11749
11750 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
11751 if (!saved_state)
11752 return -ENOMEM;
11753
11754 /* FIXME: before the switch to atomic started, a new pipe_config was
11755 * kzalloc'd. Code that depends on any field being zero should be
11756 * fixed, so that the crtc_state can be safely duplicated. For now,
11757 * only fields that are know to not cause problems are preserved. */
11758
11759 saved_state->scaler_state = crtc_state->scaler_state;
11760 saved_state->shared_dpll = crtc_state->shared_dpll;
11761 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
11762 saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
11763 saved_state->crc_enabled = crtc_state->crc_enabled;
11764 if (IS_G4X(dev_priv) ||
11765 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11766 saved_state->wm = crtc_state->wm;
11767
11768 /* Keep base drm_crtc_state intact, only clear our extended struct */
11769 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11770 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
11771 sizeof(*crtc_state) - sizeof(crtc_state->base));
11772
11773 kfree(saved_state);
11774 return 0;
11775 }
11776
11777 static int
11778 intel_modeset_pipe_config(struct drm_crtc *crtc,
11779 struct intel_crtc_state *pipe_config)
11780 {
11781 struct drm_atomic_state *state = pipe_config->base.state;
11782 struct intel_encoder *encoder;
11783 struct drm_connector *connector;
11784 struct drm_connector_state *connector_state;
11785 int base_bpp, ret;
11786 int i;
11787 bool retry = true;
11788
11789 ret = clear_intel_crtc_state(pipe_config);
11790 if (ret)
11791 return ret;
11792
11793 pipe_config->cpu_transcoder =
11794 (enum transcoder) to_intel_crtc(crtc)->pipe;
11795
11796 /*
11797 * Sanitize sync polarity flags based on requested ones. If neither
11798 * positive or negative polarity is requested, treat this as meaning
11799 * negative polarity.
11800 */
11801 if (!(pipe_config->base.adjusted_mode.flags &
11802 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11803 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11804
11805 if (!(pipe_config->base.adjusted_mode.flags &
11806 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11807 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11808
11809 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11810 pipe_config);
11811 if (ret)
11812 return ret;
11813
11814 base_bpp = pipe_config->pipe_bpp;
11815
11816 /*
11817 * Determine the real pipe dimensions. Note that stereo modes can
11818 * increase the actual pipe size due to the frame doubling and
11819 * insertion of additional space for blanks between the frame. This
11820 * is stored in the crtc timings. We use the requested mode to do this
11821 * computation to clearly distinguish it from the adjusted mode, which
11822 * can be changed by the connectors in the below retry loop.
11823 */
11824 drm_mode_get_hv_timing(&pipe_config->base.mode,
11825 &pipe_config->pipe_src_w,
11826 &pipe_config->pipe_src_h);
11827
11828 for_each_new_connector_in_state(state, connector, connector_state, i) {
11829 if (connector_state->crtc != crtc)
11830 continue;
11831
11832 encoder = to_intel_encoder(connector_state->best_encoder);
11833
11834 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11835 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11836 return -EINVAL;
11837 }
11838
11839 /*
11840 * Determine output_types before calling the .compute_config()
11841 * hooks so that the hooks can use this information safely.
11842 */
11843 if (encoder->compute_output_type)
11844 pipe_config->output_types |=
11845 BIT(encoder->compute_output_type(encoder, pipe_config,
11846 connector_state));
11847 else
11848 pipe_config->output_types |= BIT(encoder->type);
11849 }
11850
11851 encoder_retry:
11852 /* Ensure the port clock defaults are reset when retrying. */
11853 pipe_config->port_clock = 0;
11854 pipe_config->pixel_multiplier = 1;
11855
11856 /* Fill in default crtc timings, allow encoders to overwrite them. */
11857 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11858 CRTC_STEREO_DOUBLE);
11859
11860 /* Pass our mode to the connectors and the CRTC to give them a chance to
11861 * adjust it according to limitations or connector properties, and also
11862 * a chance to reject the mode entirely.
11863 */
11864 for_each_new_connector_in_state(state, connector, connector_state, i) {
11865 if (connector_state->crtc != crtc)
11866 continue;
11867
11868 encoder = to_intel_encoder(connector_state->best_encoder);
11869 ret = encoder->compute_config(encoder, pipe_config,
11870 connector_state);
11871 if (ret < 0) {
11872 if (ret != -EDEADLK)
11873 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11874 ret);
11875 return ret;
11876 }
11877 }
11878
11879 /* Set default port clock if not overwritten by the encoder. Needs to be
11880 * done afterwards in case the encoder adjusts the mode. */
11881 if (!pipe_config->port_clock)
11882 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11883 * pipe_config->pixel_multiplier;
11884
11885 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11886 if (ret == -EDEADLK)
11887 return ret;
11888 if (ret < 0) {
11889 DRM_DEBUG_KMS("CRTC fixup failed\n");
11890 return ret;
11891 }
11892
11893 if (ret == RETRY) {
11894 if (WARN(!retry, "loop in pipe configuration computation\n"))
11895 return -EINVAL;
11896
11897 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11898 retry = false;
11899 goto encoder_retry;
11900 }
11901
11902 /* Dithering seems to not pass-through bits correctly when it should, so
11903 * only enable it on 6bpc panels and when its not a compliance
11904 * test requesting 6bpc video pattern.
11905 */
11906 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11907 !pipe_config->dither_force_disable;
11908 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11909 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11910
11911 return 0;
11912 }
11913
11914 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11915 {
11916 int diff;
11917
11918 if (clock1 == clock2)
11919 return true;
11920
11921 if (!clock1 || !clock2)
11922 return false;
11923
11924 diff = abs(clock1 - clock2);
11925
11926 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11927 return true;
11928
11929 return false;
11930 }
11931
11932 static bool
11933 intel_compare_m_n(unsigned int m, unsigned int n,
11934 unsigned int m2, unsigned int n2,
11935 bool exact)
11936 {
11937 if (m == m2 && n == n2)
11938 return true;
11939
11940 if (exact || !m || !n || !m2 || !n2)
11941 return false;
11942
11943 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11944
11945 if (n > n2) {
11946 while (n > n2) {
11947 m2 <<= 1;
11948 n2 <<= 1;
11949 }
11950 } else if (n < n2) {
11951 while (n < n2) {
11952 m <<= 1;
11953 n <<= 1;
11954 }
11955 }
11956
11957 if (n != n2)
11958 return false;
11959
11960 return intel_fuzzy_clock_check(m, m2);
11961 }
11962
11963 static bool
11964 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11965 struct intel_link_m_n *m2_n2,
11966 bool adjust)
11967 {
11968 if (m_n->tu == m2_n2->tu &&
11969 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11970 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11971 intel_compare_m_n(m_n->link_m, m_n->link_n,
11972 m2_n2->link_m, m2_n2->link_n, !adjust)) {
11973 if (adjust)
11974 *m2_n2 = *m_n;
11975
11976 return true;
11977 }
11978
11979 return false;
11980 }
11981
11982 static bool
11983 intel_compare_infoframe(const union hdmi_infoframe *a,
11984 const union hdmi_infoframe *b)
11985 {
11986 return memcmp(a, b, sizeof(*a)) == 0;
11987 }
11988
11989 static void
11990 pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
11991 bool adjust, const char *name,
11992 const union hdmi_infoframe *a,
11993 const union hdmi_infoframe *b)
11994 {
11995 if (adjust) {
11996 if ((drm_debug & DRM_UT_KMS) == 0)
11997 return;
11998
11999 drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
12000 drm_dbg(DRM_UT_KMS, "expected:");
12001 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12002 drm_dbg(DRM_UT_KMS, "found");
12003 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12004 } else {
12005 drm_err("mismatch in %s infoframe", name);
12006 drm_err("expected:");
12007 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12008 drm_err("found");
12009 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12010 }
12011 }
12012
12013 static void __printf(3, 4)
12014 pipe_config_err(bool adjust, const char *name, const char *format, ...)
12015 {
12016 struct va_format vaf;
12017 va_list args;
12018
12019 va_start(args, format);
12020 vaf.fmt = format;
12021 vaf.va = &args;
12022
12023 if (adjust)
12024 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
12025 else
12026 drm_err("mismatch in %s %pV", name, &vaf);
12027
12028 va_end(args);
12029 }
12030
12031 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12032 {
12033 if (i915_modparams.fastboot != -1)
12034 return i915_modparams.fastboot;
12035
12036 /* Enable fastboot by default on Skylake and newer */
12037 if (INTEL_GEN(dev_priv) >= 9)
12038 return true;
12039
12040 /* Enable fastboot by default on VLV and CHV */
12041 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12042 return true;
12043
12044 /* Disabled by default on all others */
12045 return false;
12046 }
12047
12048 static bool
12049 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12050 struct intel_crtc_state *current_config,
12051 struct intel_crtc_state *pipe_config,
12052 bool adjust)
12053 {
12054 bool ret = true;
12055 bool fixup_inherited = adjust &&
12056 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12057 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12058
12059 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12060 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12061 ret = false;
12062 }
12063
12064 #define PIPE_CONF_CHECK_X(name) do { \
12065 if (current_config->name != pipe_config->name) { \
12066 pipe_config_err(adjust, __stringify(name), \
12067 "(expected 0x%08x, found 0x%08x)\n", \
12068 current_config->name, \
12069 pipe_config->name); \
12070 ret = false; \
12071 } \
12072 } while (0)
12073
12074 #define PIPE_CONF_CHECK_I(name) do { \
12075 if (current_config->name != pipe_config->name) { \
12076 pipe_config_err(adjust, __stringify(name), \
12077 "(expected %i, found %i)\n", \
12078 current_config->name, \
12079 pipe_config->name); \
12080 ret = false; \
12081 } \
12082 } while (0)
12083
12084 #define PIPE_CONF_CHECK_BOOL(name) do { \
12085 if (current_config->name != pipe_config->name) { \
12086 pipe_config_err(adjust, __stringify(name), \
12087 "(expected %s, found %s)\n", \
12088 yesno(current_config->name), \
12089 yesno(pipe_config->name)); \
12090 ret = false; \
12091 } \
12092 } while (0)
12093
12094 /*
12095 * Checks state where we only read out the enabling, but not the entire
12096 * state itself (like full infoframes or ELD for audio). These states
12097 * require a full modeset on bootup to fix up.
12098 */
12099 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12100 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12101 PIPE_CONF_CHECK_BOOL(name); \
12102 } else { \
12103 pipe_config_err(adjust, __stringify(name), \
12104 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12105 yesno(current_config->name), \
12106 yesno(pipe_config->name)); \
12107 ret = false; \
12108 } \
12109 } while (0)
12110
12111 #define PIPE_CONF_CHECK_P(name) do { \
12112 if (current_config->name != pipe_config->name) { \
12113 pipe_config_err(adjust, __stringify(name), \
12114 "(expected %p, found %p)\n", \
12115 current_config->name, \
12116 pipe_config->name); \
12117 ret = false; \
12118 } \
12119 } while (0)
12120
12121 #define PIPE_CONF_CHECK_M_N(name) do { \
12122 if (!intel_compare_link_m_n(&current_config->name, \
12123 &pipe_config->name,\
12124 adjust)) { \
12125 pipe_config_err(adjust, __stringify(name), \
12126 "(expected tu %i gmch %i/%i link %i/%i, " \
12127 "found tu %i, gmch %i/%i link %i/%i)\n", \
12128 current_config->name.tu, \
12129 current_config->name.gmch_m, \
12130 current_config->name.gmch_n, \
12131 current_config->name.link_m, \
12132 current_config->name.link_n, \
12133 pipe_config->name.tu, \
12134 pipe_config->name.gmch_m, \
12135 pipe_config->name.gmch_n, \
12136 pipe_config->name.link_m, \
12137 pipe_config->name.link_n); \
12138 ret = false; \
12139 } \
12140 } while (0)
12141
12142 /* This is required for BDW+ where there is only one set of registers for
12143 * switching between high and low RR.
12144 * This macro can be used whenever a comparison has to be made between one
12145 * hw state and multiple sw state variables.
12146 */
12147 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12148 if (!intel_compare_link_m_n(&current_config->name, \
12149 &pipe_config->name, adjust) && \
12150 !intel_compare_link_m_n(&current_config->alt_name, \
12151 &pipe_config->name, adjust)) { \
12152 pipe_config_err(adjust, __stringify(name), \
12153 "(expected tu %i gmch %i/%i link %i/%i, " \
12154 "or tu %i gmch %i/%i link %i/%i, " \
12155 "found tu %i, gmch %i/%i link %i/%i)\n", \
12156 current_config->name.tu, \
12157 current_config->name.gmch_m, \
12158 current_config->name.gmch_n, \
12159 current_config->name.link_m, \
12160 current_config->name.link_n, \
12161 current_config->alt_name.tu, \
12162 current_config->alt_name.gmch_m, \
12163 current_config->alt_name.gmch_n, \
12164 current_config->alt_name.link_m, \
12165 current_config->alt_name.link_n, \
12166 pipe_config->name.tu, \
12167 pipe_config->name.gmch_m, \
12168 pipe_config->name.gmch_n, \
12169 pipe_config->name.link_m, \
12170 pipe_config->name.link_n); \
12171 ret = false; \
12172 } \
12173 } while (0)
12174
12175 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12176 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12177 pipe_config_err(adjust, __stringify(name), \
12178 "(%x) (expected %i, found %i)\n", \
12179 (mask), \
12180 current_config->name & (mask), \
12181 pipe_config->name & (mask)); \
12182 ret = false; \
12183 } \
12184 } while (0)
12185
12186 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12187 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12188 pipe_config_err(adjust, __stringify(name), \
12189 "(expected %i, found %i)\n", \
12190 current_config->name, \
12191 pipe_config->name); \
12192 ret = false; \
12193 } \
12194 } while (0)
12195
12196 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12197 if (!intel_compare_infoframe(&current_config->infoframes.name, \
12198 &pipe_config->infoframes.name)) { \
12199 pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
12200 &current_config->infoframes.name, \
12201 &pipe_config->infoframes.name); \
12202 ret = false; \
12203 } \
12204 } while (0)
12205
12206 #define PIPE_CONF_QUIRK(quirk) \
12207 ((current_config->quirks | pipe_config->quirks) & (quirk))
12208
12209 PIPE_CONF_CHECK_I(cpu_transcoder);
12210
12211 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12212 PIPE_CONF_CHECK_I(fdi_lanes);
12213 PIPE_CONF_CHECK_M_N(fdi_m_n);
12214
12215 PIPE_CONF_CHECK_I(lane_count);
12216 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12217
12218 if (INTEL_GEN(dev_priv) < 8) {
12219 PIPE_CONF_CHECK_M_N(dp_m_n);
12220
12221 if (current_config->has_drrs)
12222 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12223 } else
12224 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12225
12226 PIPE_CONF_CHECK_X(output_types);
12227
12228 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12229 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12230 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12231 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12232 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12233 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12234
12235 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12236 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12237 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12238 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12239 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12240 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12241
12242 PIPE_CONF_CHECK_I(pixel_multiplier);
12243 PIPE_CONF_CHECK_I(output_format);
12244 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12245 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12246 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12247 PIPE_CONF_CHECK_BOOL(limited_color_range);
12248
12249 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12250 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12251 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12252
12253 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12254
12255 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12256 DRM_MODE_FLAG_INTERLACE);
12257
12258 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12259 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12260 DRM_MODE_FLAG_PHSYNC);
12261 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12262 DRM_MODE_FLAG_NHSYNC);
12263 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12264 DRM_MODE_FLAG_PVSYNC);
12265 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12266 DRM_MODE_FLAG_NVSYNC);
12267 }
12268
12269 PIPE_CONF_CHECK_X(gmch_pfit.control);
12270 /* pfit ratios are autocomputed by the hw on gen4+ */
12271 if (INTEL_GEN(dev_priv) < 4)
12272 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12273 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12274
12275 if (!adjust) {
12276 PIPE_CONF_CHECK_I(pipe_src_w);
12277 PIPE_CONF_CHECK_I(pipe_src_h);
12278
12279 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12280 if (current_config->pch_pfit.enabled) {
12281 PIPE_CONF_CHECK_X(pch_pfit.pos);
12282 PIPE_CONF_CHECK_X(pch_pfit.size);
12283 }
12284
12285 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12286 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12287
12288 PIPE_CONF_CHECK_X(gamma_mode);
12289 if (IS_CHERRYVIEW(dev_priv))
12290 PIPE_CONF_CHECK_X(cgm_mode);
12291 else
12292 PIPE_CONF_CHECK_X(csc_mode);
12293 PIPE_CONF_CHECK_BOOL(gamma_enable);
12294 PIPE_CONF_CHECK_BOOL(csc_enable);
12295 }
12296
12297 PIPE_CONF_CHECK_BOOL(double_wide);
12298
12299 PIPE_CONF_CHECK_P(shared_dpll);
12300 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12301 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12302 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12303 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12304 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12305 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12306 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12307 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12308 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12309 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12310 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12311 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12312 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12313 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12314 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12315 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12316 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12317 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12318 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12319 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12320 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12321 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12322 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12323 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12324 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12325 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12326 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12327 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12328 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12329 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12330 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12331
12332 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12333 PIPE_CONF_CHECK_X(dsi_pll.div);
12334
12335 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12336 PIPE_CONF_CHECK_I(pipe_bpp);
12337
12338 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12339 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12340
12341 PIPE_CONF_CHECK_I(min_voltage_level);
12342
12343 PIPE_CONF_CHECK_X(infoframes.enable);
12344 PIPE_CONF_CHECK_X(infoframes.gcp);
12345 PIPE_CONF_CHECK_INFOFRAME(avi);
12346 PIPE_CONF_CHECK_INFOFRAME(spd);
12347 PIPE_CONF_CHECK_INFOFRAME(hdmi);
12348
12349 #undef PIPE_CONF_CHECK_X
12350 #undef PIPE_CONF_CHECK_I
12351 #undef PIPE_CONF_CHECK_BOOL
12352 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12353 #undef PIPE_CONF_CHECK_P
12354 #undef PIPE_CONF_CHECK_FLAGS
12355 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12356 #undef PIPE_CONF_QUIRK
12357
12358 return ret;
12359 }
12360
12361 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12362 const struct intel_crtc_state *pipe_config)
12363 {
12364 if (pipe_config->has_pch_encoder) {
12365 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12366 &pipe_config->fdi_m_n);
12367 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12368
12369 /*
12370 * FDI already provided one idea for the dotclock.
12371 * Yell if the encoder disagrees.
12372 */
12373 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12374 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12375 fdi_dotclock, dotclock);
12376 }
12377 }
12378
12379 static void verify_wm_state(struct drm_crtc *crtc,
12380 struct drm_crtc_state *new_state)
12381 {
12382 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12383 struct skl_hw_state {
12384 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12385 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12386 struct skl_ddb_allocation ddb;
12387 struct skl_pipe_wm wm;
12388 } *hw;
12389 struct skl_ddb_allocation *sw_ddb;
12390 struct skl_pipe_wm *sw_wm;
12391 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12392 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12393 const enum pipe pipe = intel_crtc->pipe;
12394 int plane, level, max_level = ilk_wm_max_level(dev_priv);
12395
12396 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12397 return;
12398
12399 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12400 if (!hw)
12401 return;
12402
12403 skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
12404 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12405
12406 skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
12407
12408 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12409 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12410
12411 if (INTEL_GEN(dev_priv) >= 11 &&
12412 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12413 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12414 sw_ddb->enabled_slices,
12415 hw->ddb.enabled_slices);
12416
12417 /* planes */
12418 for_each_universal_plane(dev_priv, pipe, plane) {
12419 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12420
12421 hw_plane_wm = &hw->wm.planes[plane];
12422 sw_plane_wm = &sw_wm->planes[plane];
12423
12424 /* Watermarks */
12425 for (level = 0; level <= max_level; level++) {
12426 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12427 &sw_plane_wm->wm[level]))
12428 continue;
12429
12430 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12431 pipe_name(pipe), plane + 1, level,
12432 sw_plane_wm->wm[level].plane_en,
12433 sw_plane_wm->wm[level].plane_res_b,
12434 sw_plane_wm->wm[level].plane_res_l,
12435 hw_plane_wm->wm[level].plane_en,
12436 hw_plane_wm->wm[level].plane_res_b,
12437 hw_plane_wm->wm[level].plane_res_l);
12438 }
12439
12440 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12441 &sw_plane_wm->trans_wm)) {
12442 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12443 pipe_name(pipe), plane + 1,
12444 sw_plane_wm->trans_wm.plane_en,
12445 sw_plane_wm->trans_wm.plane_res_b,
12446 sw_plane_wm->trans_wm.plane_res_l,
12447 hw_plane_wm->trans_wm.plane_en,
12448 hw_plane_wm->trans_wm.plane_res_b,
12449 hw_plane_wm->trans_wm.plane_res_l);
12450 }
12451
12452 /* DDB */
12453 hw_ddb_entry = &hw->ddb_y[plane];
12454 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12455
12456 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12457 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12458 pipe_name(pipe), plane + 1,
12459 sw_ddb_entry->start, sw_ddb_entry->end,
12460 hw_ddb_entry->start, hw_ddb_entry->end);
12461 }
12462 }
12463
12464 /*
12465 * cursor
12466 * If the cursor plane isn't active, we may not have updated it's ddb
12467 * allocation. In that case since the ddb allocation will be updated
12468 * once the plane becomes visible, we can skip this check
12469 */
12470 if (1) {
12471 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12472
12473 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12474 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12475
12476 /* Watermarks */
12477 for (level = 0; level <= max_level; level++) {
12478 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12479 &sw_plane_wm->wm[level]))
12480 continue;
12481
12482 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12483 pipe_name(pipe), level,
12484 sw_plane_wm->wm[level].plane_en,
12485 sw_plane_wm->wm[level].plane_res_b,
12486 sw_plane_wm->wm[level].plane_res_l,
12487 hw_plane_wm->wm[level].plane_en,
12488 hw_plane_wm->wm[level].plane_res_b,
12489 hw_plane_wm->wm[level].plane_res_l);
12490 }
12491
12492 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12493 &sw_plane_wm->trans_wm)) {
12494 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12495 pipe_name(pipe),
12496 sw_plane_wm->trans_wm.plane_en,
12497 sw_plane_wm->trans_wm.plane_res_b,
12498 sw_plane_wm->trans_wm.plane_res_l,
12499 hw_plane_wm->trans_wm.plane_en,
12500 hw_plane_wm->trans_wm.plane_res_b,
12501 hw_plane_wm->trans_wm.plane_res_l);
12502 }
12503
12504 /* DDB */
12505 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12506 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12507
12508 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12509 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12510 pipe_name(pipe),
12511 sw_ddb_entry->start, sw_ddb_entry->end,
12512 hw_ddb_entry->start, hw_ddb_entry->end);
12513 }
12514 }
12515
12516 kfree(hw);
12517 }
12518
12519 static void
12520 verify_connector_state(struct drm_device *dev,
12521 struct drm_atomic_state *state,
12522 struct drm_crtc *crtc)
12523 {
12524 struct drm_connector *connector;
12525 struct drm_connector_state *new_conn_state;
12526 int i;
12527
12528 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12529 struct drm_encoder *encoder = connector->encoder;
12530 struct drm_crtc_state *crtc_state = NULL;
12531
12532 if (new_conn_state->crtc != crtc)
12533 continue;
12534
12535 if (crtc)
12536 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12537
12538 intel_connector_verify_state(crtc_state, new_conn_state);
12539
12540 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12541 "connector's atomic encoder doesn't match legacy encoder\n");
12542 }
12543 }
12544
12545 static void
12546 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12547 {
12548 struct intel_encoder *encoder;
12549 struct drm_connector *connector;
12550 struct drm_connector_state *old_conn_state, *new_conn_state;
12551 int i;
12552
12553 for_each_intel_encoder(dev, encoder) {
12554 bool enabled = false, found = false;
12555 enum pipe pipe;
12556
12557 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12558 encoder->base.base.id,
12559 encoder->base.name);
12560
12561 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12562 new_conn_state, i) {
12563 if (old_conn_state->best_encoder == &encoder->base)
12564 found = true;
12565
12566 if (new_conn_state->best_encoder != &encoder->base)
12567 continue;
12568 found = enabled = true;
12569
12570 I915_STATE_WARN(new_conn_state->crtc !=
12571 encoder->base.crtc,
12572 "connector's crtc doesn't match encoder crtc\n");
12573 }
12574
12575 if (!found)
12576 continue;
12577
12578 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12579 "encoder's enabled state mismatch "
12580 "(expected %i, found %i)\n",
12581 !!encoder->base.crtc, enabled);
12582
12583 if (!encoder->base.crtc) {
12584 bool active;
12585
12586 active = encoder->get_hw_state(encoder, &pipe);
12587 I915_STATE_WARN(active,
12588 "encoder detached but still enabled on pipe %c.\n",
12589 pipe_name(pipe));
12590 }
12591 }
12592 }
12593
12594 static void
12595 verify_crtc_state(struct drm_crtc *crtc,
12596 struct drm_crtc_state *old_crtc_state,
12597 struct drm_crtc_state *new_crtc_state)
12598 {
12599 struct drm_device *dev = crtc->dev;
12600 struct drm_i915_private *dev_priv = to_i915(dev);
12601 struct intel_encoder *encoder;
12602 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12603 struct intel_crtc_state *pipe_config, *sw_config;
12604 struct drm_atomic_state *old_state;
12605 bool active;
12606
12607 old_state = old_crtc_state->state;
12608 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12609 pipe_config = to_intel_crtc_state(old_crtc_state);
12610 memset(pipe_config, 0, sizeof(*pipe_config));
12611 pipe_config->base.crtc = crtc;
12612 pipe_config->base.state = old_state;
12613
12614 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12615
12616 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12617
12618 /* we keep both pipes enabled on 830 */
12619 if (IS_I830(dev_priv))
12620 active = new_crtc_state->active;
12621
12622 I915_STATE_WARN(new_crtc_state->active != active,
12623 "crtc active state doesn't match with hw state "
12624 "(expected %i, found %i)\n", new_crtc_state->active, active);
12625
12626 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12627 "transitional active state does not match atomic hw state "
12628 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12629
12630 for_each_encoder_on_crtc(dev, crtc, encoder) {
12631 enum pipe pipe;
12632
12633 active = encoder->get_hw_state(encoder, &pipe);
12634 I915_STATE_WARN(active != new_crtc_state->active,
12635 "[ENCODER:%i] active %i with crtc active %i\n",
12636 encoder->base.base.id, active, new_crtc_state->active);
12637
12638 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12639 "Encoder connected to wrong pipe %c\n",
12640 pipe_name(pipe));
12641
12642 if (active)
12643 encoder->get_config(encoder, pipe_config);
12644 }
12645
12646 intel_crtc_compute_pixel_rate(pipe_config);
12647
12648 if (!new_crtc_state->active)
12649 return;
12650
12651 intel_pipe_config_sanity_check(dev_priv, pipe_config);
12652
12653 sw_config = to_intel_crtc_state(new_crtc_state);
12654 if (!intel_pipe_config_compare(dev_priv, sw_config,
12655 pipe_config, false)) {
12656 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12657 intel_dump_pipe_config(intel_crtc, pipe_config,
12658 "[hw state]");
12659 intel_dump_pipe_config(intel_crtc, sw_config,
12660 "[sw state]");
12661 }
12662 }
12663
12664 static void
12665 intel_verify_planes(struct intel_atomic_state *state)
12666 {
12667 struct intel_plane *plane;
12668 const struct intel_plane_state *plane_state;
12669 int i;
12670
12671 for_each_new_intel_plane_in_state(state, plane,
12672 plane_state, i)
12673 assert_plane(plane, plane_state->slave ||
12674 plane_state->base.visible);
12675 }
12676
12677 static void
12678 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12679 struct intel_shared_dpll *pll,
12680 struct drm_crtc *crtc,
12681 struct drm_crtc_state *new_state)
12682 {
12683 struct intel_dpll_hw_state dpll_hw_state;
12684 unsigned int crtc_mask;
12685 bool active;
12686
12687 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12688
12689 DRM_DEBUG_KMS("%s\n", pll->info->name);
12690
12691 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12692
12693 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12694 I915_STATE_WARN(!pll->on && pll->active_mask,
12695 "pll in active use but not on in sw tracking\n");
12696 I915_STATE_WARN(pll->on && !pll->active_mask,
12697 "pll is on but not used by any active crtc\n");
12698 I915_STATE_WARN(pll->on != active,
12699 "pll on state mismatch (expected %i, found %i)\n",
12700 pll->on, active);
12701 }
12702
12703 if (!crtc) {
12704 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12705 "more active pll users than references: %x vs %x\n",
12706 pll->active_mask, pll->state.crtc_mask);
12707
12708 return;
12709 }
12710
12711 crtc_mask = drm_crtc_mask(crtc);
12712
12713 if (new_state->active)
12714 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12715 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12716 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12717 else
12718 I915_STATE_WARN(pll->active_mask & crtc_mask,
12719 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12720 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12721
12722 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12723 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12724 crtc_mask, pll->state.crtc_mask);
12725
12726 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12727 &dpll_hw_state,
12728 sizeof(dpll_hw_state)),
12729 "pll hw state mismatch\n");
12730 }
12731
12732 static void
12733 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12734 struct drm_crtc_state *old_crtc_state,
12735 struct drm_crtc_state *new_crtc_state)
12736 {
12737 struct drm_i915_private *dev_priv = to_i915(dev);
12738 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12739 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12740
12741 if (new_state->shared_dpll)
12742 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12743
12744 if (old_state->shared_dpll &&
12745 old_state->shared_dpll != new_state->shared_dpll) {
12746 unsigned int crtc_mask = drm_crtc_mask(crtc);
12747 struct intel_shared_dpll *pll = old_state->shared_dpll;
12748
12749 I915_STATE_WARN(pll->active_mask & crtc_mask,
12750 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12751 pipe_name(drm_crtc_index(crtc)));
12752 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12753 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12754 pipe_name(drm_crtc_index(crtc)));
12755 }
12756 }
12757
12758 static void
12759 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12760 struct drm_atomic_state *state,
12761 struct drm_crtc_state *old_state,
12762 struct drm_crtc_state *new_state)
12763 {
12764 if (!needs_modeset(new_state) &&
12765 !to_intel_crtc_state(new_state)->update_pipe)
12766 return;
12767
12768 verify_wm_state(crtc, new_state);
12769 verify_connector_state(crtc->dev, state, crtc);
12770 verify_crtc_state(crtc, old_state, new_state);
12771 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12772 }
12773
12774 static void
12775 verify_disabled_dpll_state(struct drm_device *dev)
12776 {
12777 struct drm_i915_private *dev_priv = to_i915(dev);
12778 int i;
12779
12780 for (i = 0; i < dev_priv->num_shared_dpll; i++)
12781 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12782 }
12783
12784 static void
12785 intel_modeset_verify_disabled(struct drm_device *dev,
12786 struct drm_atomic_state *state)
12787 {
12788 verify_encoder_state(dev, state);
12789 verify_connector_state(dev, state, NULL);
12790 verify_disabled_dpll_state(dev);
12791 }
12792
12793 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12794 {
12795 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12797
12798 /*
12799 * The scanline counter increments at the leading edge of hsync.
12800 *
12801 * On most platforms it starts counting from vtotal-1 on the
12802 * first active line. That means the scanline counter value is
12803 * always one less than what we would expect. Ie. just after
12804 * start of vblank, which also occurs at start of hsync (on the
12805 * last active line), the scanline counter will read vblank_start-1.
12806 *
12807 * On gen2 the scanline counter starts counting from 1 instead
12808 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12809 * to keep the value positive), instead of adding one.
12810 *
12811 * On HSW+ the behaviour of the scanline counter depends on the output
12812 * type. For DP ports it behaves like most other platforms, but on HDMI
12813 * there's an extra 1 line difference. So we need to add two instead of
12814 * one to the value.
12815 *
12816 * On VLV/CHV DSI the scanline counter would appear to increment
12817 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12818 * that means we can't tell whether we're in vblank or not while
12819 * we're on that particular line. We must still set scanline_offset
12820 * to 1 so that the vblank timestamps come out correct when we query
12821 * the scanline counter from within the vblank interrupt handler.
12822 * However if queried just before the start of vblank we'll get an
12823 * answer that's slightly in the future.
12824 */
12825 if (IS_GEN(dev_priv, 2)) {
12826 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12827 int vtotal;
12828
12829 vtotal = adjusted_mode->crtc_vtotal;
12830 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12831 vtotal /= 2;
12832
12833 crtc->scanline_offset = vtotal - 1;
12834 } else if (HAS_DDI(dev_priv) &&
12835 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12836 crtc->scanline_offset = 2;
12837 } else
12838 crtc->scanline_offset = 1;
12839 }
12840
12841 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12842 {
12843 struct drm_device *dev = state->dev;
12844 struct drm_i915_private *dev_priv = to_i915(dev);
12845 struct drm_crtc *crtc;
12846 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12847 int i;
12848
12849 if (!dev_priv->display.crtc_compute_clock)
12850 return;
12851
12852 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12854 struct intel_shared_dpll *old_dpll =
12855 to_intel_crtc_state(old_crtc_state)->shared_dpll;
12856
12857 if (!needs_modeset(new_crtc_state))
12858 continue;
12859
12860 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12861
12862 if (!old_dpll)
12863 continue;
12864
12865 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12866 }
12867 }
12868
12869 /*
12870 * This implements the workaround described in the "notes" section of the mode
12871 * set sequence documentation. When going from no pipes or single pipe to
12872 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12873 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12874 */
12875 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12876 {
12877 struct drm_crtc_state *crtc_state;
12878 struct intel_crtc *intel_crtc;
12879 struct drm_crtc *crtc;
12880 struct intel_crtc_state *first_crtc_state = NULL;
12881 struct intel_crtc_state *other_crtc_state = NULL;
12882 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12883 int i;
12884
12885 /* look at all crtc's that are going to be enabled in during modeset */
12886 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12887 intel_crtc = to_intel_crtc(crtc);
12888
12889 if (!crtc_state->active || !needs_modeset(crtc_state))
12890 continue;
12891
12892 if (first_crtc_state) {
12893 other_crtc_state = to_intel_crtc_state(crtc_state);
12894 break;
12895 } else {
12896 first_crtc_state = to_intel_crtc_state(crtc_state);
12897 first_pipe = intel_crtc->pipe;
12898 }
12899 }
12900
12901 /* No workaround needed? */
12902 if (!first_crtc_state)
12903 return 0;
12904
12905 /* w/a possibly needed, check how many crtc's are already enabled. */
12906 for_each_intel_crtc(state->dev, intel_crtc) {
12907 struct intel_crtc_state *pipe_config;
12908
12909 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12910 if (IS_ERR(pipe_config))
12911 return PTR_ERR(pipe_config);
12912
12913 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12914
12915 if (!pipe_config->base.active ||
12916 needs_modeset(&pipe_config->base))
12917 continue;
12918
12919 /* 2 or more enabled crtcs means no need for w/a */
12920 if (enabled_pipe != INVALID_PIPE)
12921 return 0;
12922
12923 enabled_pipe = intel_crtc->pipe;
12924 }
12925
12926 if (enabled_pipe != INVALID_PIPE)
12927 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12928 else if (other_crtc_state)
12929 other_crtc_state->hsw_workaround_pipe = first_pipe;
12930
12931 return 0;
12932 }
12933
12934 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12935 {
12936 struct drm_crtc *crtc;
12937
12938 /* Add all pipes to the state */
12939 for_each_crtc(state->dev, crtc) {
12940 struct drm_crtc_state *crtc_state;
12941
12942 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12943 if (IS_ERR(crtc_state))
12944 return PTR_ERR(crtc_state);
12945 }
12946
12947 return 0;
12948 }
12949
12950 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12951 {
12952 struct drm_crtc *crtc;
12953
12954 /*
12955 * Add all pipes to the state, and force
12956 * a modeset on all the active ones.
12957 */
12958 for_each_crtc(state->dev, crtc) {
12959 struct drm_crtc_state *crtc_state;
12960 int ret;
12961
12962 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12963 if (IS_ERR(crtc_state))
12964 return PTR_ERR(crtc_state);
12965
12966 if (!crtc_state->active || needs_modeset(crtc_state))
12967 continue;
12968
12969 crtc_state->mode_changed = true;
12970
12971 ret = drm_atomic_add_affected_connectors(state, crtc);
12972 if (ret)
12973 return ret;
12974
12975 ret = drm_atomic_add_affected_planes(state, crtc);
12976 if (ret)
12977 return ret;
12978 }
12979
12980 return 0;
12981 }
12982
12983 static int intel_modeset_checks(struct drm_atomic_state *state)
12984 {
12985 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12986 struct drm_i915_private *dev_priv = to_i915(state->dev);
12987 struct drm_crtc *crtc;
12988 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12989 int ret = 0, i;
12990
12991 if (!check_digital_port_conflicts(state)) {
12992 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12993 return -EINVAL;
12994 }
12995
12996 /* keep the current setting */
12997 if (!intel_state->cdclk.force_min_cdclk_changed)
12998 intel_state->cdclk.force_min_cdclk =
12999 dev_priv->cdclk.force_min_cdclk;
13000
13001 intel_state->modeset = true;
13002 intel_state->active_crtcs = dev_priv->active_crtcs;
13003 intel_state->cdclk.logical = dev_priv->cdclk.logical;
13004 intel_state->cdclk.actual = dev_priv->cdclk.actual;
13005 intel_state->cdclk.pipe = INVALID_PIPE;
13006
13007 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13008 if (new_crtc_state->active)
13009 intel_state->active_crtcs |= 1 << i;
13010 else
13011 intel_state->active_crtcs &= ~(1 << i);
13012
13013 if (old_crtc_state->active != new_crtc_state->active)
13014 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13015 }
13016
13017 /*
13018 * See if the config requires any additional preparation, e.g.
13019 * to adjust global state with pipes off. We need to do this
13020 * here so we can get the modeset_pipe updated config for the new
13021 * mode set on this crtc. For other crtcs we need to use the
13022 * adjusted_mode bits in the crtc directly.
13023 */
13024 if (dev_priv->display.modeset_calc_cdclk) {
13025 enum pipe pipe;
13026
13027 ret = dev_priv->display.modeset_calc_cdclk(state);
13028 if (ret < 0)
13029 return ret;
13030
13031 /*
13032 * Writes to dev_priv->cdclk.logical must protected by
13033 * holding all the crtc locks, even if we don't end up
13034 * touching the hardware
13035 */
13036 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13037 &intel_state->cdclk.logical)) {
13038 ret = intel_lock_all_pipes(state);
13039 if (ret < 0)
13040 return ret;
13041 }
13042
13043 if (is_power_of_2(intel_state->active_crtcs)) {
13044 struct drm_crtc *crtc;
13045 struct drm_crtc_state *crtc_state;
13046
13047 pipe = ilog2(intel_state->active_crtcs);
13048 crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
13049 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
13050 if (crtc_state && needs_modeset(crtc_state))
13051 pipe = INVALID_PIPE;
13052 } else {
13053 pipe = INVALID_PIPE;
13054 }
13055
13056 /* All pipes must be switched off while we change the cdclk. */
13057 if (pipe != INVALID_PIPE &&
13058 intel_cdclk_needs_cd2x_update(dev_priv,
13059 &dev_priv->cdclk.actual,
13060 &intel_state->cdclk.actual)) {
13061 ret = intel_lock_all_pipes(state);
13062 if (ret < 0)
13063 return ret;
13064
13065 intel_state->cdclk.pipe = pipe;
13066 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13067 &intel_state->cdclk.actual)) {
13068 ret = intel_modeset_all_pipes(state);
13069 if (ret < 0)
13070 return ret;
13071
13072 intel_state->cdclk.pipe = INVALID_PIPE;
13073 }
13074
13075 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13076 intel_state->cdclk.logical.cdclk,
13077 intel_state->cdclk.actual.cdclk);
13078 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13079 intel_state->cdclk.logical.voltage_level,
13080 intel_state->cdclk.actual.voltage_level);
13081 }
13082
13083 intel_modeset_clear_plls(state);
13084
13085 if (IS_HASWELL(dev_priv))
13086 return haswell_mode_set_planes_workaround(state);
13087
13088 return 0;
13089 }
13090
13091 /*
13092 * Handle calculation of various watermark data at the end of the atomic check
13093 * phase. The code here should be run after the per-crtc and per-plane 'check'
13094 * handlers to ensure that all derived state has been updated.
13095 */
13096 static int calc_watermark_data(struct intel_atomic_state *state)
13097 {
13098 struct drm_device *dev = state->base.dev;
13099 struct drm_i915_private *dev_priv = to_i915(dev);
13100
13101 /* Is there platform-specific watermark information to calculate? */
13102 if (dev_priv->display.compute_global_watermarks)
13103 return dev_priv->display.compute_global_watermarks(state);
13104
13105 return 0;
13106 }
13107
13108 /**
13109 * intel_atomic_check - validate state object
13110 * @dev: drm device
13111 * @state: state to validate
13112 */
13113 static int intel_atomic_check(struct drm_device *dev,
13114 struct drm_atomic_state *state)
13115 {
13116 struct drm_i915_private *dev_priv = to_i915(dev);
13117 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13118 struct drm_crtc *crtc;
13119 struct drm_crtc_state *old_crtc_state, *crtc_state;
13120 int ret, i;
13121 bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
13122
13123 /* Catch I915_MODE_FLAG_INHERITED */
13124 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
13125 crtc_state, i) {
13126 if (crtc_state->mode.private_flags !=
13127 old_crtc_state->mode.private_flags)
13128 crtc_state->mode_changed = true;
13129 }
13130
13131 ret = drm_atomic_helper_check_modeset(dev, state);
13132 if (ret)
13133 return ret;
13134
13135 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
13136 struct intel_crtc_state *pipe_config =
13137 to_intel_crtc_state(crtc_state);
13138
13139 if (!needs_modeset(crtc_state))
13140 continue;
13141
13142 if (!crtc_state->enable) {
13143 any_ms = true;
13144 continue;
13145 }
13146
13147 ret = intel_modeset_pipe_config(crtc, pipe_config);
13148 if (ret == -EDEADLK)
13149 return ret;
13150 if (ret) {
13151 intel_dump_pipe_config(to_intel_crtc(crtc),
13152 pipe_config, "[failed]");
13153 return ret;
13154 }
13155
13156 if (intel_pipe_config_compare(dev_priv,
13157 to_intel_crtc_state(old_crtc_state),
13158 pipe_config, true)) {
13159 crtc_state->mode_changed = false;
13160 pipe_config->update_pipe = true;
13161 }
13162
13163 if (needs_modeset(crtc_state))
13164 any_ms = true;
13165
13166 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13167 needs_modeset(crtc_state) ?
13168 "[modeset]" : "[fastset]");
13169 }
13170
13171 ret = drm_dp_mst_atomic_check(state);
13172 if (ret)
13173 return ret;
13174
13175 if (any_ms) {
13176 ret = intel_modeset_checks(state);
13177
13178 if (ret)
13179 return ret;
13180 } else {
13181 intel_state->cdclk.logical = dev_priv->cdclk.logical;
13182 }
13183
13184 ret = icl_add_linked_planes(intel_state);
13185 if (ret)
13186 return ret;
13187
13188 ret = drm_atomic_helper_check_planes(dev, state);
13189 if (ret)
13190 return ret;
13191
13192 intel_fbc_choose_crtc(dev_priv, intel_state);
13193 return calc_watermark_data(intel_state);
13194 }
13195
13196 static int intel_atomic_prepare_commit(struct drm_device *dev,
13197 struct drm_atomic_state *state)
13198 {
13199 return drm_atomic_helper_prepare_planes(dev, state);
13200 }
13201
13202 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13203 {
13204 struct drm_device *dev = crtc->base.dev;
13205 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13206
13207 if (!vblank->max_vblank_count)
13208 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13209
13210 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13211 }
13212
13213 static void intel_update_crtc(struct drm_crtc *crtc,
13214 struct drm_atomic_state *state,
13215 struct drm_crtc_state *old_crtc_state,
13216 struct drm_crtc_state *new_crtc_state)
13217 {
13218 struct drm_device *dev = crtc->dev;
13219 struct drm_i915_private *dev_priv = to_i915(dev);
13220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13221 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13222 bool modeset = needs_modeset(new_crtc_state);
13223 struct intel_plane_state *new_plane_state =
13224 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13225 to_intel_plane(crtc->primary));
13226
13227 if (modeset) {
13228 update_scanline_offset(pipe_config);
13229 dev_priv->display.crtc_enable(pipe_config, state);
13230
13231 /* vblanks work again, re-enable pipe CRC. */
13232 intel_crtc_enable_pipe_crc(intel_crtc);
13233 } else {
13234 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13235 pipe_config);
13236
13237 if (pipe_config->update_pipe)
13238 intel_encoders_update_pipe(crtc, pipe_config, state);
13239 }
13240
13241 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13242 intel_fbc_disable(intel_crtc);
13243 else if (new_plane_state)
13244 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13245
13246 intel_begin_crtc_commit(crtc, old_crtc_state);
13247
13248 if (INTEL_GEN(dev_priv) >= 9)
13249 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13250 else
13251 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13252
13253 intel_finish_crtc_commit(crtc, old_crtc_state);
13254 }
13255
13256 static void intel_update_crtcs(struct drm_atomic_state *state)
13257 {
13258 struct drm_crtc *crtc;
13259 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13260 int i;
13261
13262 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13263 if (!new_crtc_state->active)
13264 continue;
13265
13266 intel_update_crtc(crtc, state, old_crtc_state,
13267 new_crtc_state);
13268 }
13269 }
13270
13271 static void skl_update_crtcs(struct drm_atomic_state *state)
13272 {
13273 struct drm_i915_private *dev_priv = to_i915(state->dev);
13274 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13275 struct drm_crtc *crtc;
13276 struct intel_crtc *intel_crtc;
13277 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13278 struct intel_crtc_state *cstate;
13279 unsigned int updated = 0;
13280 bool progress;
13281 enum pipe pipe;
13282 int i;
13283 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13284 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13285 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13286
13287 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13288 /* ignore allocations for crtc's that have been turned off. */
13289 if (new_crtc_state->active)
13290 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13291
13292 /* If 2nd DBuf slice required, enable it here */
13293 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13294 icl_dbuf_slices_update(dev_priv, required_slices);
13295
13296 /*
13297 * Whenever the number of active pipes changes, we need to make sure we
13298 * update the pipes in the right order so that their ddb allocations
13299 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13300 * cause pipe underruns and other bad stuff.
13301 */
13302 do {
13303 progress = false;
13304
13305 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13306 bool vbl_wait = false;
13307 unsigned int cmask = drm_crtc_mask(crtc);
13308
13309 intel_crtc = to_intel_crtc(crtc);
13310 cstate = to_intel_crtc_state(new_crtc_state);
13311 pipe = intel_crtc->pipe;
13312
13313 if (updated & cmask || !cstate->base.active)
13314 continue;
13315
13316 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13317 entries,
13318 INTEL_INFO(dev_priv)->num_pipes, i))
13319 continue;
13320
13321 updated |= cmask;
13322 entries[i] = cstate->wm.skl.ddb;
13323
13324 /*
13325 * If this is an already active pipe, it's DDB changed,
13326 * and this isn't the last pipe that needs updating
13327 * then we need to wait for a vblank to pass for the
13328 * new ddb allocation to take effect.
13329 */
13330 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13331 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13332 !new_crtc_state->active_changed &&
13333 intel_state->wm_results.dirty_pipes != updated)
13334 vbl_wait = true;
13335
13336 intel_update_crtc(crtc, state, old_crtc_state,
13337 new_crtc_state);
13338
13339 if (vbl_wait)
13340 intel_wait_for_vblank(dev_priv, pipe);
13341
13342 progress = true;
13343 }
13344 } while (progress);
13345
13346 /* If 2nd DBuf slice is no more required disable it */
13347 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13348 icl_dbuf_slices_update(dev_priv, required_slices);
13349 }
13350
13351 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13352 {
13353 struct intel_atomic_state *state, *next;
13354 struct llist_node *freed;
13355
13356 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13357 llist_for_each_entry_safe(state, next, freed, freed)
13358 drm_atomic_state_put(&state->base);
13359 }
13360
13361 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13362 {
13363 struct drm_i915_private *dev_priv =
13364 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13365
13366 intel_atomic_helper_free_state(dev_priv);
13367 }
13368
13369 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13370 {
13371 struct wait_queue_entry wait_fence, wait_reset;
13372 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13373
13374 init_wait_entry(&wait_fence, 0);
13375 init_wait_entry(&wait_reset, 0);
13376 for (;;) {
13377 prepare_to_wait(&intel_state->commit_ready.wait,
13378 &wait_fence, TASK_UNINTERRUPTIBLE);
13379 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13380 &wait_reset, TASK_UNINTERRUPTIBLE);
13381
13382
13383 if (i915_sw_fence_done(&intel_state->commit_ready)
13384 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13385 break;
13386
13387 schedule();
13388 }
13389 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13390 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13391 }
13392
13393 static void intel_atomic_cleanup_work(struct work_struct *work)
13394 {
13395 struct drm_atomic_state *state =
13396 container_of(work, struct drm_atomic_state, commit_work);
13397 struct drm_i915_private *i915 = to_i915(state->dev);
13398
13399 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13400 drm_atomic_helper_commit_cleanup_done(state);
13401 drm_atomic_state_put(state);
13402
13403 intel_atomic_helper_free_state(i915);
13404 }
13405
13406 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13407 {
13408 struct drm_device *dev = state->dev;
13409 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13410 struct drm_i915_private *dev_priv = to_i915(dev);
13411 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13412 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13413 struct drm_crtc *crtc;
13414 struct intel_crtc *intel_crtc;
13415 u64 put_domains[I915_MAX_PIPES] = {};
13416 intel_wakeref_t wakeref = 0;
13417 int i;
13418
13419 intel_atomic_commit_fence_wait(intel_state);
13420
13421 drm_atomic_helper_wait_for_dependencies(state);
13422
13423 if (intel_state->modeset)
13424 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13425
13426 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13427 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13428 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13429 intel_crtc = to_intel_crtc(crtc);
13430
13431 if (needs_modeset(new_crtc_state) ||
13432 to_intel_crtc_state(new_crtc_state)->update_pipe) {
13433
13434 put_domains[intel_crtc->pipe] =
13435 modeset_get_crtc_power_domains(crtc,
13436 new_intel_crtc_state);
13437 }
13438
13439 if (!needs_modeset(new_crtc_state))
13440 continue;
13441
13442 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13443
13444 if (old_crtc_state->active) {
13445 intel_crtc_disable_planes(intel_state, intel_crtc);
13446
13447 /*
13448 * We need to disable pipe CRC before disabling the pipe,
13449 * or we race against vblank off.
13450 */
13451 intel_crtc_disable_pipe_crc(intel_crtc);
13452
13453 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13454 intel_crtc->active = false;
13455 intel_fbc_disable(intel_crtc);
13456 intel_disable_shared_dpll(old_intel_crtc_state);
13457
13458 /*
13459 * Underruns don't always raise
13460 * interrupts, so check manually.
13461 */
13462 intel_check_cpu_fifo_underruns(dev_priv);
13463 intel_check_pch_fifo_underruns(dev_priv);
13464
13465 /* FIXME unify this for all platforms */
13466 if (!new_crtc_state->active &&
13467 !HAS_GMCH(dev_priv) &&
13468 dev_priv->display.initial_watermarks)
13469 dev_priv->display.initial_watermarks(intel_state,
13470 new_intel_crtc_state);
13471 }
13472 }
13473
13474 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13475 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13476 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13477
13478 if (intel_state->modeset) {
13479 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13480
13481 intel_set_cdclk_pre_plane_update(dev_priv,
13482 &intel_state->cdclk.actual,
13483 &dev_priv->cdclk.actual,
13484 intel_state->cdclk.pipe);
13485
13486 /*
13487 * SKL workaround: bspec recommends we disable the SAGV when we
13488 * have more then one pipe enabled
13489 */
13490 if (!intel_can_enable_sagv(state))
13491 intel_disable_sagv(dev_priv);
13492
13493 intel_modeset_verify_disabled(dev, state);
13494 }
13495
13496 /* Complete the events for pipes that have now been disabled */
13497 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13498 bool modeset = needs_modeset(new_crtc_state);
13499
13500 /* Complete events for now disable pipes here. */
13501 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13502 spin_lock_irq(&dev->event_lock);
13503 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13504 spin_unlock_irq(&dev->event_lock);
13505
13506 new_crtc_state->event = NULL;
13507 }
13508 }
13509
13510 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13511 dev_priv->display.update_crtcs(state);
13512
13513 if (intel_state->modeset)
13514 intel_set_cdclk_post_plane_update(dev_priv,
13515 &intel_state->cdclk.actual,
13516 &dev_priv->cdclk.actual,
13517 intel_state->cdclk.pipe);
13518
13519 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13520 * already, but still need the state for the delayed optimization. To
13521 * fix this:
13522 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13523 * - schedule that vblank worker _before_ calling hw_done
13524 * - at the start of commit_tail, cancel it _synchrously
13525 * - switch over to the vblank wait helper in the core after that since
13526 * we don't need out special handling any more.
13527 */
13528 drm_atomic_helper_wait_for_flip_done(dev, state);
13529
13530 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13531 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13532
13533 if (new_crtc_state->active &&
13534 !needs_modeset(new_crtc_state) &&
13535 (new_intel_crtc_state->base.color_mgmt_changed ||
13536 new_intel_crtc_state->update_pipe))
13537 intel_color_load_luts(new_intel_crtc_state);
13538 }
13539
13540 /*
13541 * Now that the vblank has passed, we can go ahead and program the
13542 * optimal watermarks on platforms that need two-step watermark
13543 * programming.
13544 *
13545 * TODO: Move this (and other cleanup) to an async worker eventually.
13546 */
13547 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13548 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13549
13550 if (dev_priv->display.optimize_watermarks)
13551 dev_priv->display.optimize_watermarks(intel_state,
13552 new_intel_crtc_state);
13553 }
13554
13555 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13556 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13557
13558 if (put_domains[i])
13559 modeset_put_power_domains(dev_priv, put_domains[i]);
13560
13561 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13562 }
13563
13564 if (intel_state->modeset)
13565 intel_verify_planes(intel_state);
13566
13567 if (intel_state->modeset && intel_can_enable_sagv(state))
13568 intel_enable_sagv(dev_priv);
13569
13570 drm_atomic_helper_commit_hw_done(state);
13571
13572 if (intel_state->modeset) {
13573 /* As one of the primary mmio accessors, KMS has a high
13574 * likelihood of triggering bugs in unclaimed access. After we
13575 * finish modesetting, see if an error has been flagged, and if
13576 * so enable debugging for the next modeset - and hope we catch
13577 * the culprit.
13578 */
13579 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13580 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13581 }
13582
13583 /*
13584 * Defer the cleanup of the old state to a separate worker to not
13585 * impede the current task (userspace for blocking modesets) that
13586 * are executed inline. For out-of-line asynchronous modesets/flips,
13587 * deferring to a new worker seems overkill, but we would place a
13588 * schedule point (cond_resched()) here anyway to keep latencies
13589 * down.
13590 */
13591 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13592 queue_work(system_highpri_wq, &state->commit_work);
13593 }
13594
13595 static void intel_atomic_commit_work(struct work_struct *work)
13596 {
13597 struct drm_atomic_state *state =
13598 container_of(work, struct drm_atomic_state, commit_work);
13599
13600 intel_atomic_commit_tail(state);
13601 }
13602
13603 static int __i915_sw_fence_call
13604 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13605 enum i915_sw_fence_notify notify)
13606 {
13607 struct intel_atomic_state *state =
13608 container_of(fence, struct intel_atomic_state, commit_ready);
13609
13610 switch (notify) {
13611 case FENCE_COMPLETE:
13612 /* we do blocking waits in the worker, nothing to do here */
13613 break;
13614 case FENCE_FREE:
13615 {
13616 struct intel_atomic_helper *helper =
13617 &to_i915(state->base.dev)->atomic_helper;
13618
13619 if (llist_add(&state->freed, &helper->free_list))
13620 schedule_work(&helper->free_work);
13621 break;
13622 }
13623 }
13624
13625 return NOTIFY_DONE;
13626 }
13627
13628 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13629 {
13630 struct drm_plane_state *old_plane_state, *new_plane_state;
13631 struct drm_plane *plane;
13632 int i;
13633
13634 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13635 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13636 intel_fb_obj(new_plane_state->fb),
13637 to_intel_plane(plane)->frontbuffer_bit);
13638 }
13639
13640 /**
13641 * intel_atomic_commit - commit validated state object
13642 * @dev: DRM device
13643 * @state: the top-level driver state object
13644 * @nonblock: nonblocking commit
13645 *
13646 * This function commits a top-level state object that has been validated
13647 * with drm_atomic_helper_check().
13648 *
13649 * RETURNS
13650 * Zero for success or -errno.
13651 */
13652 static int intel_atomic_commit(struct drm_device *dev,
13653 struct drm_atomic_state *state,
13654 bool nonblock)
13655 {
13656 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13657 struct drm_i915_private *dev_priv = to_i915(dev);
13658 int ret = 0;
13659
13660 drm_atomic_state_get(state);
13661 i915_sw_fence_init(&intel_state->commit_ready,
13662 intel_atomic_commit_ready);
13663
13664 /*
13665 * The intel_legacy_cursor_update() fast path takes care
13666 * of avoiding the vblank waits for simple cursor
13667 * movement and flips. For cursor on/off and size changes,
13668 * we want to perform the vblank waits so that watermark
13669 * updates happen during the correct frames. Gen9+ have
13670 * double buffered watermarks and so shouldn't need this.
13671 *
13672 * Unset state->legacy_cursor_update before the call to
13673 * drm_atomic_helper_setup_commit() because otherwise
13674 * drm_atomic_helper_wait_for_flip_done() is a noop and
13675 * we get FIFO underruns because we didn't wait
13676 * for vblank.
13677 *
13678 * FIXME doing watermarks and fb cleanup from a vblank worker
13679 * (assuming we had any) would solve these problems.
13680 */
13681 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13682 struct intel_crtc_state *new_crtc_state;
13683 struct intel_crtc *crtc;
13684 int i;
13685
13686 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13687 if (new_crtc_state->wm.need_postvbl_update ||
13688 new_crtc_state->update_wm_post)
13689 state->legacy_cursor_update = false;
13690 }
13691
13692 ret = intel_atomic_prepare_commit(dev, state);
13693 if (ret) {
13694 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13695 i915_sw_fence_commit(&intel_state->commit_ready);
13696 return ret;
13697 }
13698
13699 ret = drm_atomic_helper_setup_commit(state, nonblock);
13700 if (!ret)
13701 ret = drm_atomic_helper_swap_state(state, true);
13702
13703 if (ret) {
13704 i915_sw_fence_commit(&intel_state->commit_ready);
13705
13706 drm_atomic_helper_cleanup_planes(dev, state);
13707 return ret;
13708 }
13709 dev_priv->wm.distrust_bios_wm = false;
13710 intel_shared_dpll_swap_state(state);
13711 intel_atomic_track_fbs(state);
13712
13713 if (intel_state->modeset) {
13714 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13715 sizeof(intel_state->min_cdclk));
13716 memcpy(dev_priv->min_voltage_level,
13717 intel_state->min_voltage_level,
13718 sizeof(intel_state->min_voltage_level));
13719 dev_priv->active_crtcs = intel_state->active_crtcs;
13720 dev_priv->cdclk.force_min_cdclk =
13721 intel_state->cdclk.force_min_cdclk;
13722
13723 intel_cdclk_swap_state(intel_state);
13724 }
13725
13726 drm_atomic_state_get(state);
13727 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13728
13729 i915_sw_fence_commit(&intel_state->commit_ready);
13730 if (nonblock && intel_state->modeset) {
13731 queue_work(dev_priv->modeset_wq, &state->commit_work);
13732 } else if (nonblock) {
13733 queue_work(system_unbound_wq, &state->commit_work);
13734 } else {
13735 if (intel_state->modeset)
13736 flush_workqueue(dev_priv->modeset_wq);
13737 intel_atomic_commit_tail(state);
13738 }
13739
13740 return 0;
13741 }
13742
13743 static const struct drm_crtc_funcs intel_crtc_funcs = {
13744 .gamma_set = drm_atomic_helper_legacy_gamma_set,
13745 .set_config = drm_atomic_helper_set_config,
13746 .destroy = intel_crtc_destroy,
13747 .page_flip = drm_atomic_helper_page_flip,
13748 .atomic_duplicate_state = intel_crtc_duplicate_state,
13749 .atomic_destroy_state = intel_crtc_destroy_state,
13750 .set_crc_source = intel_crtc_set_crc_source,
13751 .verify_crc_source = intel_crtc_verify_crc_source,
13752 .get_crc_sources = intel_crtc_get_crc_sources,
13753 };
13754
13755 struct wait_rps_boost {
13756 struct wait_queue_entry wait;
13757
13758 struct drm_crtc *crtc;
13759 struct i915_request *request;
13760 };
13761
13762 static int do_rps_boost(struct wait_queue_entry *_wait,
13763 unsigned mode, int sync, void *key)
13764 {
13765 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13766 struct i915_request *rq = wait->request;
13767
13768 /*
13769 * If we missed the vblank, but the request is already running it
13770 * is reasonable to assume that it will complete before the next
13771 * vblank without our intervention, so leave RPS alone.
13772 */
13773 if (!i915_request_started(rq))
13774 gen6_rps_boost(rq);
13775 i915_request_put(rq);
13776
13777 drm_crtc_vblank_put(wait->crtc);
13778
13779 list_del(&wait->wait.entry);
13780 kfree(wait);
13781 return 1;
13782 }
13783
13784 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13785 struct dma_fence *fence)
13786 {
13787 struct wait_rps_boost *wait;
13788
13789 if (!dma_fence_is_i915(fence))
13790 return;
13791
13792 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13793 return;
13794
13795 if (drm_crtc_vblank_get(crtc))
13796 return;
13797
13798 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13799 if (!wait) {
13800 drm_crtc_vblank_put(crtc);
13801 return;
13802 }
13803
13804 wait->request = to_request(dma_fence_get(fence));
13805 wait->crtc = crtc;
13806
13807 wait->wait.func = do_rps_boost;
13808 wait->wait.flags = 0;
13809
13810 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13811 }
13812
13813 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13814 {
13815 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13816 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13817 struct drm_framebuffer *fb = plane_state->base.fb;
13818 struct i915_vma *vma;
13819
13820 if (plane->id == PLANE_CURSOR &&
13821 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
13822 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13823 const int align = intel_cursor_alignment(dev_priv);
13824 int err;
13825
13826 err = i915_gem_object_attach_phys(obj, align);
13827 if (err)
13828 return err;
13829 }
13830
13831 vma = intel_pin_and_fence_fb_obj(fb,
13832 &plane_state->view,
13833 intel_plane_uses_fence(plane_state),
13834 &plane_state->flags);
13835 if (IS_ERR(vma))
13836 return PTR_ERR(vma);
13837
13838 plane_state->vma = vma;
13839
13840 return 0;
13841 }
13842
13843 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13844 {
13845 struct i915_vma *vma;
13846
13847 vma = fetch_and_zero(&old_plane_state->vma);
13848 if (vma)
13849 intel_unpin_fb_vma(vma, old_plane_state->flags);
13850 }
13851
13852 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13853 {
13854 struct i915_sched_attr attr = {
13855 .priority = I915_PRIORITY_DISPLAY,
13856 };
13857
13858 i915_gem_object_wait_priority(obj, 0, &attr);
13859 }
13860
13861 /**
13862 * intel_prepare_plane_fb - Prepare fb for usage on plane
13863 * @plane: drm plane to prepare for
13864 * @new_state: the plane state being prepared
13865 *
13866 * Prepares a framebuffer for usage on a display plane. Generally this
13867 * involves pinning the underlying object and updating the frontbuffer tracking
13868 * bits. Some older platforms need special physical address handling for
13869 * cursor planes.
13870 *
13871 * Must be called with struct_mutex held.
13872 *
13873 * Returns 0 on success, negative error code on failure.
13874 */
13875 int
13876 intel_prepare_plane_fb(struct drm_plane *plane,
13877 struct drm_plane_state *new_state)
13878 {
13879 struct intel_atomic_state *intel_state =
13880 to_intel_atomic_state(new_state->state);
13881 struct drm_i915_private *dev_priv = to_i915(plane->dev);
13882 struct drm_framebuffer *fb = new_state->fb;
13883 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13884 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13885 int ret;
13886
13887 if (old_obj) {
13888 struct drm_crtc_state *crtc_state =
13889 drm_atomic_get_new_crtc_state(new_state->state,
13890 plane->state->crtc);
13891
13892 /* Big Hammer, we also need to ensure that any pending
13893 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13894 * current scanout is retired before unpinning the old
13895 * framebuffer. Note that we rely on userspace rendering
13896 * into the buffer attached to the pipe they are waiting
13897 * on. If not, userspace generates a GPU hang with IPEHR
13898 * point to the MI_WAIT_FOR_EVENT.
13899 *
13900 * This should only fail upon a hung GPU, in which case we
13901 * can safely continue.
13902 */
13903 if (needs_modeset(crtc_state)) {
13904 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13905 old_obj->resv, NULL,
13906 false, 0,
13907 GFP_KERNEL);
13908 if (ret < 0)
13909 return ret;
13910 }
13911 }
13912
13913 if (new_state->fence) { /* explicit fencing */
13914 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13915 new_state->fence,
13916 I915_FENCE_TIMEOUT,
13917 GFP_KERNEL);
13918 if (ret < 0)
13919 return ret;
13920 }
13921
13922 if (!obj)
13923 return 0;
13924
13925 ret = i915_gem_object_pin_pages(obj);
13926 if (ret)
13927 return ret;
13928
13929 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13930 if (ret) {
13931 i915_gem_object_unpin_pages(obj);
13932 return ret;
13933 }
13934
13935 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13936
13937 mutex_unlock(&dev_priv->drm.struct_mutex);
13938 i915_gem_object_unpin_pages(obj);
13939 if (ret)
13940 return ret;
13941
13942 fb_obj_bump_render_priority(obj);
13943 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13944
13945 if (!new_state->fence) { /* implicit fencing */
13946 struct dma_fence *fence;
13947
13948 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13949 obj->resv, NULL,
13950 false, I915_FENCE_TIMEOUT,
13951 GFP_KERNEL);
13952 if (ret < 0)
13953 return ret;
13954
13955 fence = reservation_object_get_excl_rcu(obj->resv);
13956 if (fence) {
13957 add_rps_boost_after_vblank(new_state->crtc, fence);
13958 dma_fence_put(fence);
13959 }
13960 } else {
13961 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13962 }
13963
13964 /*
13965 * We declare pageflips to be interactive and so merit a small bias
13966 * towards upclocking to deliver the frame on time. By only changing
13967 * the RPS thresholds to sample more regularly and aim for higher
13968 * clocks we can hopefully deliver low power workloads (like kodi)
13969 * that are not quite steady state without resorting to forcing
13970 * maximum clocks following a vblank miss (see do_rps_boost()).
13971 */
13972 if (!intel_state->rps_interactive) {
13973 intel_rps_mark_interactive(dev_priv, true);
13974 intel_state->rps_interactive = true;
13975 }
13976
13977 return 0;
13978 }
13979
13980 /**
13981 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13982 * @plane: drm plane to clean up for
13983 * @old_state: the state from the previous modeset
13984 *
13985 * Cleans up a framebuffer that has just been removed from a plane.
13986 *
13987 * Must be called with struct_mutex held.
13988 */
13989 void
13990 intel_cleanup_plane_fb(struct drm_plane *plane,
13991 struct drm_plane_state *old_state)
13992 {
13993 struct intel_atomic_state *intel_state =
13994 to_intel_atomic_state(old_state->state);
13995 struct drm_i915_private *dev_priv = to_i915(plane->dev);
13996
13997 if (intel_state->rps_interactive) {
13998 intel_rps_mark_interactive(dev_priv, false);
13999 intel_state->rps_interactive = false;
14000 }
14001
14002 /* Should only be called after a successful intel_prepare_plane_fb()! */
14003 mutex_lock(&dev_priv->drm.struct_mutex);
14004 intel_plane_unpin_fb(to_intel_plane_state(old_state));
14005 mutex_unlock(&dev_priv->drm.struct_mutex);
14006 }
14007
14008 int
14009 skl_max_scale(const struct intel_crtc_state *crtc_state,
14010 u32 pixel_format)
14011 {
14012 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14014 int max_scale, mult;
14015 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14016
14017 if (!crtc_state->base.enable)
14018 return DRM_PLANE_HELPER_NO_SCALING;
14019
14020 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14021 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14022
14023 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14024 max_dotclk *= 2;
14025
14026 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14027 return DRM_PLANE_HELPER_NO_SCALING;
14028
14029 /*
14030 * skl max scale is lower of:
14031 * close to 3 but not 3, -1 is for that purpose
14032 * or
14033 * cdclk/crtc_clock
14034 */
14035 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14036 tmpclk1 = (1 << 16) * mult - 1;
14037 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14038 max_scale = min(tmpclk1, tmpclk2);
14039
14040 return max_scale;
14041 }
14042
14043 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14044 struct drm_crtc_state *old_crtc_state)
14045 {
14046 struct drm_device *dev = crtc->dev;
14047 struct drm_i915_private *dev_priv = to_i915(dev);
14048 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14049 struct intel_crtc_state *old_intel_cstate =
14050 to_intel_crtc_state(old_crtc_state);
14051 struct intel_atomic_state *old_intel_state =
14052 to_intel_atomic_state(old_crtc_state->state);
14053 struct intel_crtc_state *intel_cstate =
14054 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
14055 bool modeset = needs_modeset(&intel_cstate->base);
14056
14057 /* Perform vblank evasion around commit operation */
14058 intel_pipe_update_start(intel_cstate);
14059
14060 if (modeset)
14061 goto out;
14062
14063 if (intel_cstate->base.color_mgmt_changed ||
14064 intel_cstate->update_pipe)
14065 intel_color_commit(intel_cstate);
14066
14067 if (intel_cstate->update_pipe)
14068 intel_update_pipe_config(old_intel_cstate, intel_cstate);
14069 else if (INTEL_GEN(dev_priv) >= 9)
14070 skl_detach_scalers(intel_cstate);
14071
14072 out:
14073 if (dev_priv->display.atomic_update_watermarks)
14074 dev_priv->display.atomic_update_watermarks(old_intel_state,
14075 intel_cstate);
14076 }
14077
14078 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14079 struct intel_crtc_state *crtc_state)
14080 {
14081 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14082
14083 if (!IS_GEN(dev_priv, 2))
14084 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14085
14086 if (crtc_state->has_pch_encoder) {
14087 enum pipe pch_transcoder =
14088 intel_crtc_pch_transcoder(crtc);
14089
14090 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14091 }
14092 }
14093
14094 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14095 struct drm_crtc_state *old_crtc_state)
14096 {
14097 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14098 struct intel_atomic_state *old_intel_state =
14099 to_intel_atomic_state(old_crtc_state->state);
14100 struct intel_crtc_state *new_crtc_state =
14101 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
14102
14103 intel_pipe_update_end(new_crtc_state);
14104
14105 if (new_crtc_state->update_pipe &&
14106 !needs_modeset(&new_crtc_state->base) &&
14107 old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
14108 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
14109 }
14110
14111 /**
14112 * intel_plane_destroy - destroy a plane
14113 * @plane: plane to destroy
14114 *
14115 * Common destruction function for all types of planes (primary, cursor,
14116 * sprite).
14117 */
14118 void intel_plane_destroy(struct drm_plane *plane)
14119 {
14120 drm_plane_cleanup(plane);
14121 kfree(to_intel_plane(plane));
14122 }
14123
14124 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14125 u32 format, u64 modifier)
14126 {
14127 switch (modifier) {
14128 case DRM_FORMAT_MOD_LINEAR:
14129 case I915_FORMAT_MOD_X_TILED:
14130 break;
14131 default:
14132 return false;
14133 }
14134
14135 switch (format) {
14136 case DRM_FORMAT_C8:
14137 case DRM_FORMAT_RGB565:
14138 case DRM_FORMAT_XRGB1555:
14139 case DRM_FORMAT_XRGB8888:
14140 return modifier == DRM_FORMAT_MOD_LINEAR ||
14141 modifier == I915_FORMAT_MOD_X_TILED;
14142 default:
14143 return false;
14144 }
14145 }
14146
14147 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14148 u32 format, u64 modifier)
14149 {
14150 switch (modifier) {
14151 case DRM_FORMAT_MOD_LINEAR:
14152 case I915_FORMAT_MOD_X_TILED:
14153 break;
14154 default:
14155 return false;
14156 }
14157
14158 switch (format) {
14159 case DRM_FORMAT_C8:
14160 case DRM_FORMAT_RGB565:
14161 case DRM_FORMAT_XRGB8888:
14162 case DRM_FORMAT_XBGR8888:
14163 case DRM_FORMAT_XRGB2101010:
14164 case DRM_FORMAT_XBGR2101010:
14165 return modifier == DRM_FORMAT_MOD_LINEAR ||
14166 modifier == I915_FORMAT_MOD_X_TILED;
14167 default:
14168 return false;
14169 }
14170 }
14171
14172 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14173 u32 format, u64 modifier)
14174 {
14175 return modifier == DRM_FORMAT_MOD_LINEAR &&
14176 format == DRM_FORMAT_ARGB8888;
14177 }
14178
14179 static const struct drm_plane_funcs i965_plane_funcs = {
14180 .update_plane = drm_atomic_helper_update_plane,
14181 .disable_plane = drm_atomic_helper_disable_plane,
14182 .destroy = intel_plane_destroy,
14183 .atomic_get_property = intel_plane_atomic_get_property,
14184 .atomic_set_property = intel_plane_atomic_set_property,
14185 .atomic_duplicate_state = intel_plane_duplicate_state,
14186 .atomic_destroy_state = intel_plane_destroy_state,
14187 .format_mod_supported = i965_plane_format_mod_supported,
14188 };
14189
14190 static const struct drm_plane_funcs i8xx_plane_funcs = {
14191 .update_plane = drm_atomic_helper_update_plane,
14192 .disable_plane = drm_atomic_helper_disable_plane,
14193 .destroy = intel_plane_destroy,
14194 .atomic_get_property = intel_plane_atomic_get_property,
14195 .atomic_set_property = intel_plane_atomic_set_property,
14196 .atomic_duplicate_state = intel_plane_duplicate_state,
14197 .atomic_destroy_state = intel_plane_destroy_state,
14198 .format_mod_supported = i8xx_plane_format_mod_supported,
14199 };
14200
14201 static int
14202 intel_legacy_cursor_update(struct drm_plane *plane,
14203 struct drm_crtc *crtc,
14204 struct drm_framebuffer *fb,
14205 int crtc_x, int crtc_y,
14206 unsigned int crtc_w, unsigned int crtc_h,
14207 u32 src_x, u32 src_y,
14208 u32 src_w, u32 src_h,
14209 struct drm_modeset_acquire_ctx *ctx)
14210 {
14211 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14212 int ret;
14213 struct drm_plane_state *old_plane_state, *new_plane_state;
14214 struct intel_plane *intel_plane = to_intel_plane(plane);
14215 struct drm_framebuffer *old_fb;
14216 struct intel_crtc_state *crtc_state =
14217 to_intel_crtc_state(crtc->state);
14218 struct intel_crtc_state *new_crtc_state;
14219
14220 /*
14221 * When crtc is inactive or there is a modeset pending,
14222 * wait for it to complete in the slowpath
14223 */
14224 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14225 crtc_state->update_pipe)
14226 goto slow;
14227
14228 old_plane_state = plane->state;
14229 /*
14230 * Don't do an async update if there is an outstanding commit modifying
14231 * the plane. This prevents our async update's changes from getting
14232 * overridden by a previous synchronous update's state.
14233 */
14234 if (old_plane_state->commit &&
14235 !try_wait_for_completion(&old_plane_state->commit->hw_done))
14236 goto slow;
14237
14238 /*
14239 * If any parameters change that may affect watermarks,
14240 * take the slowpath. Only changing fb or position should be
14241 * in the fastpath.
14242 */
14243 if (old_plane_state->crtc != crtc ||
14244 old_plane_state->src_w != src_w ||
14245 old_plane_state->src_h != src_h ||
14246 old_plane_state->crtc_w != crtc_w ||
14247 old_plane_state->crtc_h != crtc_h ||
14248 !old_plane_state->fb != !fb)
14249 goto slow;
14250
14251 new_plane_state = intel_plane_duplicate_state(plane);
14252 if (!new_plane_state)
14253 return -ENOMEM;
14254
14255 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14256 if (!new_crtc_state) {
14257 ret = -ENOMEM;
14258 goto out_free;
14259 }
14260
14261 drm_atomic_set_fb_for_plane(new_plane_state, fb);
14262
14263 new_plane_state->src_x = src_x;
14264 new_plane_state->src_y = src_y;
14265 new_plane_state->src_w = src_w;
14266 new_plane_state->src_h = src_h;
14267 new_plane_state->crtc_x = crtc_x;
14268 new_plane_state->crtc_y = crtc_y;
14269 new_plane_state->crtc_w = crtc_w;
14270 new_plane_state->crtc_h = crtc_h;
14271
14272 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14273 to_intel_plane_state(old_plane_state),
14274 to_intel_plane_state(new_plane_state));
14275 if (ret)
14276 goto out_free;
14277
14278 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14279 if (ret)
14280 goto out_free;
14281
14282 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14283 if (ret)
14284 goto out_unlock;
14285
14286 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14287
14288 old_fb = old_plane_state->fb;
14289 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14290 intel_plane->frontbuffer_bit);
14291
14292 /* Swap plane state */
14293 plane->state = new_plane_state;
14294
14295 /*
14296 * We cannot swap crtc_state as it may be in use by an atomic commit or
14297 * page flip that's running simultaneously. If we swap crtc_state and
14298 * destroy the old state, we will cause a use-after-free there.
14299 *
14300 * Only update active_planes, which is needed for our internal
14301 * bookkeeping. Either value will do the right thing when updating
14302 * planes atomically. If the cursor was part of the atomic update then
14303 * we would have taken the slowpath.
14304 */
14305 crtc_state->active_planes = new_crtc_state->active_planes;
14306
14307 if (plane->state->visible)
14308 intel_update_plane(intel_plane, crtc_state,
14309 to_intel_plane_state(plane->state));
14310 else
14311 intel_disable_plane(intel_plane, crtc_state);
14312
14313 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14314
14315 out_unlock:
14316 mutex_unlock(&dev_priv->drm.struct_mutex);
14317 out_free:
14318 if (new_crtc_state)
14319 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14320 if (ret)
14321 intel_plane_destroy_state(plane, new_plane_state);
14322 else
14323 intel_plane_destroy_state(plane, old_plane_state);
14324 return ret;
14325
14326 slow:
14327 return drm_atomic_helper_update_plane(plane, crtc, fb,
14328 crtc_x, crtc_y, crtc_w, crtc_h,
14329 src_x, src_y, src_w, src_h, ctx);
14330 }
14331
14332 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14333 .update_plane = intel_legacy_cursor_update,
14334 .disable_plane = drm_atomic_helper_disable_plane,
14335 .destroy = intel_plane_destroy,
14336 .atomic_get_property = intel_plane_atomic_get_property,
14337 .atomic_set_property = intel_plane_atomic_set_property,
14338 .atomic_duplicate_state = intel_plane_duplicate_state,
14339 .atomic_destroy_state = intel_plane_destroy_state,
14340 .format_mod_supported = intel_cursor_format_mod_supported,
14341 };
14342
14343 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14344 enum i9xx_plane_id i9xx_plane)
14345 {
14346 if (!HAS_FBC(dev_priv))
14347 return false;
14348
14349 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14350 return i9xx_plane == PLANE_A; /* tied to pipe A */
14351 else if (IS_IVYBRIDGE(dev_priv))
14352 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14353 i9xx_plane == PLANE_C;
14354 else if (INTEL_GEN(dev_priv) >= 4)
14355 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14356 else
14357 return i9xx_plane == PLANE_A;
14358 }
14359
14360 static struct intel_plane *
14361 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14362 {
14363 struct intel_plane *plane;
14364 const struct drm_plane_funcs *plane_funcs;
14365 unsigned int supported_rotations;
14366 unsigned int possible_crtcs;
14367 const u64 *modifiers;
14368 const u32 *formats;
14369 int num_formats;
14370 int ret;
14371
14372 if (INTEL_GEN(dev_priv) >= 9)
14373 return skl_universal_plane_create(dev_priv, pipe,
14374 PLANE_PRIMARY);
14375
14376 plane = intel_plane_alloc();
14377 if (IS_ERR(plane))
14378 return plane;
14379
14380 plane->pipe = pipe;
14381 /*
14382 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14383 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14384 */
14385 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14386 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14387 else
14388 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14389 plane->id = PLANE_PRIMARY;
14390 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14391
14392 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14393 if (plane->has_fbc) {
14394 struct intel_fbc *fbc = &dev_priv->fbc;
14395
14396 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14397 }
14398
14399 if (INTEL_GEN(dev_priv) >= 4) {
14400 formats = i965_primary_formats;
14401 num_formats = ARRAY_SIZE(i965_primary_formats);
14402 modifiers = i9xx_format_modifiers;
14403
14404 plane->max_stride = i9xx_plane_max_stride;
14405 plane->update_plane = i9xx_update_plane;
14406 plane->disable_plane = i9xx_disable_plane;
14407 plane->get_hw_state = i9xx_plane_get_hw_state;
14408 plane->check_plane = i9xx_plane_check;
14409
14410 plane_funcs = &i965_plane_funcs;
14411 } else {
14412 formats = i8xx_primary_formats;
14413 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14414 modifiers = i9xx_format_modifiers;
14415
14416 plane->max_stride = i9xx_plane_max_stride;
14417 plane->update_plane = i9xx_update_plane;
14418 plane->disable_plane = i9xx_disable_plane;
14419 plane->get_hw_state = i9xx_plane_get_hw_state;
14420 plane->check_plane = i9xx_plane_check;
14421
14422 plane_funcs = &i8xx_plane_funcs;
14423 }
14424
14425 possible_crtcs = BIT(pipe);
14426
14427 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14428 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14429 possible_crtcs, plane_funcs,
14430 formats, num_formats, modifiers,
14431 DRM_PLANE_TYPE_PRIMARY,
14432 "primary %c", pipe_name(pipe));
14433 else
14434 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14435 possible_crtcs, plane_funcs,
14436 formats, num_formats, modifiers,
14437 DRM_PLANE_TYPE_PRIMARY,
14438 "plane %c",
14439 plane_name(plane->i9xx_plane));
14440 if (ret)
14441 goto fail;
14442
14443 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14444 supported_rotations =
14445 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14446 DRM_MODE_REFLECT_X;
14447 } else if (INTEL_GEN(dev_priv) >= 4) {
14448 supported_rotations =
14449 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14450 } else {
14451 supported_rotations = DRM_MODE_ROTATE_0;
14452 }
14453
14454 if (INTEL_GEN(dev_priv) >= 4)
14455 drm_plane_create_rotation_property(&plane->base,
14456 DRM_MODE_ROTATE_0,
14457 supported_rotations);
14458
14459 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14460
14461 return plane;
14462
14463 fail:
14464 intel_plane_free(plane);
14465
14466 return ERR_PTR(ret);
14467 }
14468
14469 static struct intel_plane *
14470 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14471 enum pipe pipe)
14472 {
14473 unsigned int possible_crtcs;
14474 struct intel_plane *cursor;
14475 int ret;
14476
14477 cursor = intel_plane_alloc();
14478 if (IS_ERR(cursor))
14479 return cursor;
14480
14481 cursor->pipe = pipe;
14482 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14483 cursor->id = PLANE_CURSOR;
14484 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14485
14486 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14487 cursor->max_stride = i845_cursor_max_stride;
14488 cursor->update_plane = i845_update_cursor;
14489 cursor->disable_plane = i845_disable_cursor;
14490 cursor->get_hw_state = i845_cursor_get_hw_state;
14491 cursor->check_plane = i845_check_cursor;
14492 } else {
14493 cursor->max_stride = i9xx_cursor_max_stride;
14494 cursor->update_plane = i9xx_update_cursor;
14495 cursor->disable_plane = i9xx_disable_cursor;
14496 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14497 cursor->check_plane = i9xx_check_cursor;
14498 }
14499
14500 cursor->cursor.base = ~0;
14501 cursor->cursor.cntl = ~0;
14502
14503 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14504 cursor->cursor.size = ~0;
14505
14506 possible_crtcs = BIT(pipe);
14507
14508 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14509 possible_crtcs, &intel_cursor_plane_funcs,
14510 intel_cursor_formats,
14511 ARRAY_SIZE(intel_cursor_formats),
14512 cursor_format_modifiers,
14513 DRM_PLANE_TYPE_CURSOR,
14514 "cursor %c", pipe_name(pipe));
14515 if (ret)
14516 goto fail;
14517
14518 if (INTEL_GEN(dev_priv) >= 4)
14519 drm_plane_create_rotation_property(&cursor->base,
14520 DRM_MODE_ROTATE_0,
14521 DRM_MODE_ROTATE_0 |
14522 DRM_MODE_ROTATE_180);
14523
14524 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14525
14526 return cursor;
14527
14528 fail:
14529 intel_plane_free(cursor);
14530
14531 return ERR_PTR(ret);
14532 }
14533
14534 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14535 struct intel_crtc_state *crtc_state)
14536 {
14537 struct intel_crtc_scaler_state *scaler_state =
14538 &crtc_state->scaler_state;
14539 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14540 int i;
14541
14542 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14543 if (!crtc->num_scalers)
14544 return;
14545
14546 for (i = 0; i < crtc->num_scalers; i++) {
14547 struct intel_scaler *scaler = &scaler_state->scalers[i];
14548
14549 scaler->in_use = 0;
14550 scaler->mode = 0;
14551 }
14552
14553 scaler_state->scaler_id = -1;
14554 }
14555
14556 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14557 {
14558 struct intel_crtc *intel_crtc;
14559 struct intel_crtc_state *crtc_state = NULL;
14560 struct intel_plane *primary = NULL;
14561 struct intel_plane *cursor = NULL;
14562 int sprite, ret;
14563
14564 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14565 if (!intel_crtc)
14566 return -ENOMEM;
14567
14568 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14569 if (!crtc_state) {
14570 ret = -ENOMEM;
14571 goto fail;
14572 }
14573 intel_crtc->config = crtc_state;
14574 intel_crtc->base.state = &crtc_state->base;
14575 crtc_state->base.crtc = &intel_crtc->base;
14576
14577 primary = intel_primary_plane_create(dev_priv, pipe);
14578 if (IS_ERR(primary)) {
14579 ret = PTR_ERR(primary);
14580 goto fail;
14581 }
14582 intel_crtc->plane_ids_mask |= BIT(primary->id);
14583
14584 for_each_sprite(dev_priv, pipe, sprite) {
14585 struct intel_plane *plane;
14586
14587 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14588 if (IS_ERR(plane)) {
14589 ret = PTR_ERR(plane);
14590 goto fail;
14591 }
14592 intel_crtc->plane_ids_mask |= BIT(plane->id);
14593 }
14594
14595 cursor = intel_cursor_plane_create(dev_priv, pipe);
14596 if (IS_ERR(cursor)) {
14597 ret = PTR_ERR(cursor);
14598 goto fail;
14599 }
14600 intel_crtc->plane_ids_mask |= BIT(cursor->id);
14601
14602 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14603 &primary->base, &cursor->base,
14604 &intel_crtc_funcs,
14605 "pipe %c", pipe_name(pipe));
14606 if (ret)
14607 goto fail;
14608
14609 intel_crtc->pipe = pipe;
14610
14611 /* initialize shared scalers */
14612 intel_crtc_init_scalers(intel_crtc, crtc_state);
14613
14614 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14615 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14616 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14617
14618 if (INTEL_GEN(dev_priv) < 9) {
14619 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14620
14621 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14622 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14623 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14624 }
14625
14626 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14627
14628 intel_color_init(intel_crtc);
14629
14630 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14631
14632 return 0;
14633
14634 fail:
14635 /*
14636 * drm_mode_config_cleanup() will free up any
14637 * crtcs/planes already initialized.
14638 */
14639 kfree(crtc_state);
14640 kfree(intel_crtc);
14641
14642 return ret;
14643 }
14644
14645 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14646 struct drm_file *file)
14647 {
14648 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14649 struct drm_crtc *drmmode_crtc;
14650 struct intel_crtc *crtc;
14651
14652 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14653 if (!drmmode_crtc)
14654 return -ENOENT;
14655
14656 crtc = to_intel_crtc(drmmode_crtc);
14657 pipe_from_crtc_id->pipe = crtc->pipe;
14658
14659 return 0;
14660 }
14661
14662 static int intel_encoder_clones(struct intel_encoder *encoder)
14663 {
14664 struct drm_device *dev = encoder->base.dev;
14665 struct intel_encoder *source_encoder;
14666 int index_mask = 0;
14667 int entry = 0;
14668
14669 for_each_intel_encoder(dev, source_encoder) {
14670 if (encoders_cloneable(encoder, source_encoder))
14671 index_mask |= (1 << entry);
14672
14673 entry++;
14674 }
14675
14676 return index_mask;
14677 }
14678
14679 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14680 {
14681 if (!IS_MOBILE(dev_priv))
14682 return false;
14683
14684 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14685 return false;
14686
14687 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14688 return false;
14689
14690 return true;
14691 }
14692
14693 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14694 {
14695 if (INTEL_GEN(dev_priv) >= 9)
14696 return false;
14697
14698 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14699 return false;
14700
14701 if (HAS_PCH_LPT_H(dev_priv) &&
14702 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14703 return false;
14704
14705 /* DDI E can't be used if DDI A requires 4 lanes */
14706 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14707 return false;
14708
14709 if (!dev_priv->vbt.int_crt_support)
14710 return false;
14711
14712 return true;
14713 }
14714
14715 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14716 {
14717 int pps_num;
14718 int pps_idx;
14719
14720 if (HAS_DDI(dev_priv))
14721 return;
14722 /*
14723 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14724 * everywhere where registers can be write protected.
14725 */
14726 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14727 pps_num = 2;
14728 else
14729 pps_num = 1;
14730
14731 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14732 u32 val = I915_READ(PP_CONTROL(pps_idx));
14733
14734 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14735 I915_WRITE(PP_CONTROL(pps_idx), val);
14736 }
14737 }
14738
14739 static void intel_pps_init(struct drm_i915_private *dev_priv)
14740 {
14741 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14742 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14743 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14744 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14745 else
14746 dev_priv->pps_mmio_base = PPS_BASE;
14747
14748 intel_pps_unlock_regs_wa(dev_priv);
14749 }
14750
14751 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14752 {
14753 struct intel_encoder *encoder;
14754 bool dpd_is_edp = false;
14755
14756 intel_pps_init(dev_priv);
14757
14758 if (!HAS_DISPLAY(dev_priv))
14759 return;
14760
14761 if (IS_ELKHARTLAKE(dev_priv)) {
14762 intel_ddi_init(dev_priv, PORT_A);
14763 intel_ddi_init(dev_priv, PORT_B);
14764 intel_ddi_init(dev_priv, PORT_C);
14765 icl_dsi_init(dev_priv);
14766 } else if (INTEL_GEN(dev_priv) >= 11) {
14767 intel_ddi_init(dev_priv, PORT_A);
14768 intel_ddi_init(dev_priv, PORT_B);
14769 intel_ddi_init(dev_priv, PORT_C);
14770 intel_ddi_init(dev_priv, PORT_D);
14771 intel_ddi_init(dev_priv, PORT_E);
14772 /*
14773 * On some ICL SKUs port F is not present. No strap bits for
14774 * this, so rely on VBT.
14775 * Work around broken VBTs on SKUs known to have no port F.
14776 */
14777 if (IS_ICL_WITH_PORT_F(dev_priv) &&
14778 intel_bios_is_port_present(dev_priv, PORT_F))
14779 intel_ddi_init(dev_priv, PORT_F);
14780
14781 icl_dsi_init(dev_priv);
14782 } else if (IS_GEN9_LP(dev_priv)) {
14783 /*
14784 * FIXME: Broxton doesn't support port detection via the
14785 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14786 * detect the ports.
14787 */
14788 intel_ddi_init(dev_priv, PORT_A);
14789 intel_ddi_init(dev_priv, PORT_B);
14790 intel_ddi_init(dev_priv, PORT_C);
14791
14792 vlv_dsi_init(dev_priv);
14793 } else if (HAS_DDI(dev_priv)) {
14794 int found;
14795
14796 if (intel_ddi_crt_present(dev_priv))
14797 intel_crt_init(dev_priv);
14798
14799 /*
14800 * Haswell uses DDI functions to detect digital outputs.
14801 * On SKL pre-D0 the strap isn't connected, so we assume
14802 * it's there.
14803 */
14804 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14805 /* WaIgnoreDDIAStrap: skl */
14806 if (found || IS_GEN9_BC(dev_priv))
14807 intel_ddi_init(dev_priv, PORT_A);
14808
14809 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14810 * register */
14811 found = I915_READ(SFUSE_STRAP);
14812
14813 if (found & SFUSE_STRAP_DDIB_DETECTED)
14814 intel_ddi_init(dev_priv, PORT_B);
14815 if (found & SFUSE_STRAP_DDIC_DETECTED)
14816 intel_ddi_init(dev_priv, PORT_C);
14817 if (found & SFUSE_STRAP_DDID_DETECTED)
14818 intel_ddi_init(dev_priv, PORT_D);
14819 if (found & SFUSE_STRAP_DDIF_DETECTED)
14820 intel_ddi_init(dev_priv, PORT_F);
14821 /*
14822 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14823 */
14824 if (IS_GEN9_BC(dev_priv) &&
14825 intel_bios_is_port_present(dev_priv, PORT_E))
14826 intel_ddi_init(dev_priv, PORT_E);
14827
14828 } else if (HAS_PCH_SPLIT(dev_priv)) {
14829 int found;
14830
14831 /*
14832 * intel_edp_init_connector() depends on this completing first,
14833 * to prevent the registration of both eDP and LVDS and the
14834 * incorrect sharing of the PPS.
14835 */
14836 intel_lvds_init(dev_priv);
14837 intel_crt_init(dev_priv);
14838
14839 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14840
14841 if (ilk_has_edp_a(dev_priv))
14842 intel_dp_init(dev_priv, DP_A, PORT_A);
14843
14844 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14845 /* PCH SDVOB multiplex with HDMIB */
14846 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14847 if (!found)
14848 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14849 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14850 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14851 }
14852
14853 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14854 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14855
14856 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14857 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14858
14859 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14860 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14861
14862 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14863 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14864 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14865 bool has_edp, has_port;
14866
14867 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
14868 intel_crt_init(dev_priv);
14869
14870 /*
14871 * The DP_DETECTED bit is the latched state of the DDC
14872 * SDA pin at boot. However since eDP doesn't require DDC
14873 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14874 * eDP ports may have been muxed to an alternate function.
14875 * Thus we can't rely on the DP_DETECTED bit alone to detect
14876 * eDP ports. Consult the VBT as well as DP_DETECTED to
14877 * detect eDP ports.
14878 *
14879 * Sadly the straps seem to be missing sometimes even for HDMI
14880 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14881 * and VBT for the presence of the port. Additionally we can't
14882 * trust the port type the VBT declares as we've seen at least
14883 * HDMI ports that the VBT claim are DP or eDP.
14884 */
14885 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14886 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14887 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14888 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14889 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14890 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14891
14892 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14893 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14894 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14895 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14896 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14897 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14898
14899 if (IS_CHERRYVIEW(dev_priv)) {
14900 /*
14901 * eDP not supported on port D,
14902 * so no need to worry about it
14903 */
14904 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14905 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14906 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14907 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14908 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14909 }
14910
14911 vlv_dsi_init(dev_priv);
14912 } else if (IS_PINEVIEW(dev_priv)) {
14913 intel_lvds_init(dev_priv);
14914 intel_crt_init(dev_priv);
14915 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
14916 bool found = false;
14917
14918 if (IS_MOBILE(dev_priv))
14919 intel_lvds_init(dev_priv);
14920
14921 intel_crt_init(dev_priv);
14922
14923 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14924 DRM_DEBUG_KMS("probing SDVOB\n");
14925 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14926 if (!found && IS_G4X(dev_priv)) {
14927 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14928 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14929 }
14930
14931 if (!found && IS_G4X(dev_priv))
14932 intel_dp_init(dev_priv, DP_B, PORT_B);
14933 }
14934
14935 /* Before G4X SDVOC doesn't have its own detect register */
14936
14937 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14938 DRM_DEBUG_KMS("probing SDVOC\n");
14939 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14940 }
14941
14942 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14943
14944 if (IS_G4X(dev_priv)) {
14945 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14946 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14947 }
14948 if (IS_G4X(dev_priv))
14949 intel_dp_init(dev_priv, DP_C, PORT_C);
14950 }
14951
14952 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14953 intel_dp_init(dev_priv, DP_D, PORT_D);
14954
14955 if (SUPPORTS_TV(dev_priv))
14956 intel_tv_init(dev_priv);
14957 } else if (IS_GEN(dev_priv, 2)) {
14958 if (IS_I85X(dev_priv))
14959 intel_lvds_init(dev_priv);
14960
14961 intel_crt_init(dev_priv);
14962 intel_dvo_init(dev_priv);
14963 }
14964
14965 intel_psr_init(dev_priv);
14966
14967 for_each_intel_encoder(&dev_priv->drm, encoder) {
14968 encoder->base.possible_crtcs = encoder->crtc_mask;
14969 encoder->base.possible_clones =
14970 intel_encoder_clones(encoder);
14971 }
14972
14973 intel_init_pch_refclk(dev_priv);
14974
14975 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14976 }
14977
14978 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14979 {
14980 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14981 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14982
14983 drm_framebuffer_cleanup(fb);
14984
14985 i915_gem_object_lock(obj);
14986 WARN_ON(!obj->framebuffer_references--);
14987 i915_gem_object_unlock(obj);
14988
14989 i915_gem_object_put(obj);
14990
14991 kfree(intel_fb);
14992 }
14993
14994 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14995 struct drm_file *file,
14996 unsigned int *handle)
14997 {
14998 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14999
15000 if (obj->userptr.mm) {
15001 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15002 return -EINVAL;
15003 }
15004
15005 return drm_gem_handle_create(file, &obj->base, handle);
15006 }
15007
15008 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15009 struct drm_file *file,
15010 unsigned flags, unsigned color,
15011 struct drm_clip_rect *clips,
15012 unsigned num_clips)
15013 {
15014 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15015
15016 i915_gem_object_flush_if_display(obj);
15017 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15018
15019 return 0;
15020 }
15021
15022 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15023 .destroy = intel_user_framebuffer_destroy,
15024 .create_handle = intel_user_framebuffer_create_handle,
15025 .dirty = intel_user_framebuffer_dirty,
15026 };
15027
15028 static
15029 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
15030 u32 pixel_format, u64 fb_modifier)
15031 {
15032 struct intel_crtc *crtc;
15033 struct intel_plane *plane;
15034
15035 /*
15036 * We assume the primary plane for pipe A has
15037 * the highest stride limits of them all.
15038 */
15039 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
15040 plane = to_intel_plane(crtc->base.primary);
15041
15042 return plane->max_stride(plane, pixel_format, fb_modifier,
15043 DRM_MODE_ROTATE_0);
15044 }
15045
15046 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15047 struct drm_i915_gem_object *obj,
15048 struct drm_mode_fb_cmd2 *mode_cmd)
15049 {
15050 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15051 struct drm_framebuffer *fb = &intel_fb->base;
15052 u32 pitch_limit;
15053 unsigned int tiling, stride;
15054 int ret = -EINVAL;
15055 int i;
15056
15057 i915_gem_object_lock(obj);
15058 obj->framebuffer_references++;
15059 tiling = i915_gem_object_get_tiling(obj);
15060 stride = i915_gem_object_get_stride(obj);
15061 i915_gem_object_unlock(obj);
15062
15063 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15064 /*
15065 * If there's a fence, enforce that
15066 * the fb modifier and tiling mode match.
15067 */
15068 if (tiling != I915_TILING_NONE &&
15069 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15070 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15071 goto err;
15072 }
15073 } else {
15074 if (tiling == I915_TILING_X) {
15075 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15076 } else if (tiling == I915_TILING_Y) {
15077 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15078 goto err;
15079 }
15080 }
15081
15082 if (!drm_any_plane_has_format(&dev_priv->drm,
15083 mode_cmd->pixel_format,
15084 mode_cmd->modifier[0])) {
15085 struct drm_format_name_buf format_name;
15086
15087 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15088 drm_get_format_name(mode_cmd->pixel_format,
15089 &format_name),
15090 mode_cmd->modifier[0]);
15091 goto err;
15092 }
15093
15094 /*
15095 * gen2/3 display engine uses the fence if present,
15096 * so the tiling mode must match the fb modifier exactly.
15097 */
15098 if (INTEL_GEN(dev_priv) < 4 &&
15099 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15100 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15101 goto err;
15102 }
15103
15104 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
15105 mode_cmd->modifier[0]);
15106 if (mode_cmd->pitches[0] > pitch_limit) {
15107 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15108 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15109 "tiled" : "linear",
15110 mode_cmd->pitches[0], pitch_limit);
15111 goto err;
15112 }
15113
15114 /*
15115 * If there's a fence, enforce that
15116 * the fb pitch and fence stride match.
15117 */
15118 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15119 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15120 mode_cmd->pitches[0], stride);
15121 goto err;
15122 }
15123
15124 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15125 if (mode_cmd->offsets[0] != 0)
15126 goto err;
15127
15128 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15129
15130 for (i = 0; i < fb->format->num_planes; i++) {
15131 u32 stride_alignment;
15132
15133 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15134 DRM_DEBUG_KMS("bad plane %d handle\n", i);
15135 goto err;
15136 }
15137
15138 stride_alignment = intel_fb_stride_alignment(fb, i);
15139
15140 /*
15141 * Display WA #0531: skl,bxt,kbl,glk
15142 *
15143 * Render decompression and plane width > 3840
15144 * combined with horizontal panning requires the
15145 * plane stride to be a multiple of 4. We'll just
15146 * require the entire fb to accommodate that to avoid
15147 * potential runtime errors at plane configuration time.
15148 */
15149 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15150 is_ccs_modifier(fb->modifier))
15151 stride_alignment *= 4;
15152
15153 if (fb->pitches[i] & (stride_alignment - 1)) {
15154 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15155 i, fb->pitches[i], stride_alignment);
15156 goto err;
15157 }
15158
15159 fb->obj[i] = &obj->base;
15160 }
15161
15162 ret = intel_fill_fb_info(dev_priv, fb);
15163 if (ret)
15164 goto err;
15165
15166 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15167 if (ret) {
15168 DRM_ERROR("framebuffer init failed %d\n", ret);
15169 goto err;
15170 }
15171
15172 return 0;
15173
15174 err:
15175 i915_gem_object_lock(obj);
15176 obj->framebuffer_references--;
15177 i915_gem_object_unlock(obj);
15178 return ret;
15179 }
15180
15181 static struct drm_framebuffer *
15182 intel_user_framebuffer_create(struct drm_device *dev,
15183 struct drm_file *filp,
15184 const struct drm_mode_fb_cmd2 *user_mode_cmd)
15185 {
15186 struct drm_framebuffer *fb;
15187 struct drm_i915_gem_object *obj;
15188 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15189
15190 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15191 if (!obj)
15192 return ERR_PTR(-ENOENT);
15193
15194 fb = intel_framebuffer_create(obj, &mode_cmd);
15195 if (IS_ERR(fb))
15196 i915_gem_object_put(obj);
15197
15198 return fb;
15199 }
15200
15201 static void intel_atomic_state_free(struct drm_atomic_state *state)
15202 {
15203 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15204
15205 drm_atomic_state_default_release(state);
15206
15207 i915_sw_fence_fini(&intel_state->commit_ready);
15208
15209 kfree(state);
15210 }
15211
15212 static enum drm_mode_status
15213 intel_mode_valid(struct drm_device *dev,
15214 const struct drm_display_mode *mode)
15215 {
15216 struct drm_i915_private *dev_priv = to_i915(dev);
15217 int hdisplay_max, htotal_max;
15218 int vdisplay_max, vtotal_max;
15219
15220 /*
15221 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15222 * of DBLSCAN modes to the output's mode list when they detect
15223 * the scaling mode property on the connector. And they don't
15224 * ask the kernel to validate those modes in any way until
15225 * modeset time at which point the client gets a protocol error.
15226 * So in order to not upset those clients we silently ignore the
15227 * DBLSCAN flag on such connectors. For other connectors we will
15228 * reject modes with the DBLSCAN flag in encoder->compute_config().
15229 * And we always reject DBLSCAN modes in connector->mode_valid()
15230 * as we never want such modes on the connector's mode list.
15231 */
15232
15233 if (mode->vscan > 1)
15234 return MODE_NO_VSCAN;
15235
15236 if (mode->flags & DRM_MODE_FLAG_HSKEW)
15237 return MODE_H_ILLEGAL;
15238
15239 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15240 DRM_MODE_FLAG_NCSYNC |
15241 DRM_MODE_FLAG_PCSYNC))
15242 return MODE_HSYNC;
15243
15244 if (mode->flags & (DRM_MODE_FLAG_BCAST |
15245 DRM_MODE_FLAG_PIXMUX |
15246 DRM_MODE_FLAG_CLKDIV2))
15247 return MODE_BAD;
15248
15249 if (INTEL_GEN(dev_priv) >= 9 ||
15250 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15251 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15252 vdisplay_max = 4096;
15253 htotal_max = 8192;
15254 vtotal_max = 8192;
15255 } else if (INTEL_GEN(dev_priv) >= 3) {
15256 hdisplay_max = 4096;
15257 vdisplay_max = 4096;
15258 htotal_max = 8192;
15259 vtotal_max = 8192;
15260 } else {
15261 hdisplay_max = 2048;
15262 vdisplay_max = 2048;
15263 htotal_max = 4096;
15264 vtotal_max = 4096;
15265 }
15266
15267 if (mode->hdisplay > hdisplay_max ||
15268 mode->hsync_start > htotal_max ||
15269 mode->hsync_end > htotal_max ||
15270 mode->htotal > htotal_max)
15271 return MODE_H_ILLEGAL;
15272
15273 if (mode->vdisplay > vdisplay_max ||
15274 mode->vsync_start > vtotal_max ||
15275 mode->vsync_end > vtotal_max ||
15276 mode->vtotal > vtotal_max)
15277 return MODE_V_ILLEGAL;
15278
15279 return MODE_OK;
15280 }
15281
15282 static const struct drm_mode_config_funcs intel_mode_funcs = {
15283 .fb_create = intel_user_framebuffer_create,
15284 .get_format_info = intel_get_format_info,
15285 .output_poll_changed = intel_fbdev_output_poll_changed,
15286 .mode_valid = intel_mode_valid,
15287 .atomic_check = intel_atomic_check,
15288 .atomic_commit = intel_atomic_commit,
15289 .atomic_state_alloc = intel_atomic_state_alloc,
15290 .atomic_state_clear = intel_atomic_state_clear,
15291 .atomic_state_free = intel_atomic_state_free,
15292 };
15293
15294 /**
15295 * intel_init_display_hooks - initialize the display modesetting hooks
15296 * @dev_priv: device private
15297 */
15298 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15299 {
15300 intel_init_cdclk_hooks(dev_priv);
15301
15302 if (INTEL_GEN(dev_priv) >= 9) {
15303 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15304 dev_priv->display.get_initial_plane_config =
15305 skylake_get_initial_plane_config;
15306 dev_priv->display.crtc_compute_clock =
15307 haswell_crtc_compute_clock;
15308 dev_priv->display.crtc_enable = haswell_crtc_enable;
15309 dev_priv->display.crtc_disable = haswell_crtc_disable;
15310 } else if (HAS_DDI(dev_priv)) {
15311 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15312 dev_priv->display.get_initial_plane_config =
15313 i9xx_get_initial_plane_config;
15314 dev_priv->display.crtc_compute_clock =
15315 haswell_crtc_compute_clock;
15316 dev_priv->display.crtc_enable = haswell_crtc_enable;
15317 dev_priv->display.crtc_disable = haswell_crtc_disable;
15318 } else if (HAS_PCH_SPLIT(dev_priv)) {
15319 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15320 dev_priv->display.get_initial_plane_config =
15321 i9xx_get_initial_plane_config;
15322 dev_priv->display.crtc_compute_clock =
15323 ironlake_crtc_compute_clock;
15324 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15325 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15326 } else if (IS_CHERRYVIEW(dev_priv)) {
15327 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15328 dev_priv->display.get_initial_plane_config =
15329 i9xx_get_initial_plane_config;
15330 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15331 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15332 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15333 } else if (IS_VALLEYVIEW(dev_priv)) {
15334 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15335 dev_priv->display.get_initial_plane_config =
15336 i9xx_get_initial_plane_config;
15337 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15338 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15339 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15340 } else if (IS_G4X(dev_priv)) {
15341 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15342 dev_priv->display.get_initial_plane_config =
15343 i9xx_get_initial_plane_config;
15344 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15345 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15346 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15347 } else if (IS_PINEVIEW(dev_priv)) {
15348 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15349 dev_priv->display.get_initial_plane_config =
15350 i9xx_get_initial_plane_config;
15351 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15352 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15353 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15354 } else if (!IS_GEN(dev_priv, 2)) {
15355 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15356 dev_priv->display.get_initial_plane_config =
15357 i9xx_get_initial_plane_config;
15358 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15359 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15360 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15361 } else {
15362 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15363 dev_priv->display.get_initial_plane_config =
15364 i9xx_get_initial_plane_config;
15365 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15366 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15367 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15368 }
15369
15370 if (IS_GEN(dev_priv, 5)) {
15371 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15372 } else if (IS_GEN(dev_priv, 6)) {
15373 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15374 } else if (IS_IVYBRIDGE(dev_priv)) {
15375 /* FIXME: detect B0+ stepping and use auto training */
15376 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15377 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15378 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15379 }
15380
15381 if (INTEL_GEN(dev_priv) >= 9)
15382 dev_priv->display.update_crtcs = skl_update_crtcs;
15383 else
15384 dev_priv->display.update_crtcs = intel_update_crtcs;
15385 }
15386
15387 /* Disable the VGA plane that we never use */
15388 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15389 {
15390 struct pci_dev *pdev = dev_priv->drm.pdev;
15391 u8 sr1;
15392 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15393
15394 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15395 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15396 outb(SR01, VGA_SR_INDEX);
15397 sr1 = inb(VGA_SR_DATA);
15398 outb(sr1 | 1<<5, VGA_SR_DATA);
15399 vga_put(pdev, VGA_RSRC_LEGACY_IO);
15400 udelay(300);
15401
15402 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15403 POSTING_READ(vga_reg);
15404 }
15405
15406 void intel_modeset_init_hw(struct drm_device *dev)
15407 {
15408 struct drm_i915_private *dev_priv = to_i915(dev);
15409
15410 intel_update_cdclk(dev_priv);
15411 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15412 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15413 }
15414
15415 /*
15416 * Calculate what we think the watermarks should be for the state we've read
15417 * out of the hardware and then immediately program those watermarks so that
15418 * we ensure the hardware settings match our internal state.
15419 *
15420 * We can calculate what we think WM's should be by creating a duplicate of the
15421 * current state (which was constructed during hardware readout) and running it
15422 * through the atomic check code to calculate new watermark values in the
15423 * state object.
15424 */
15425 static void sanitize_watermarks(struct drm_device *dev)
15426 {
15427 struct drm_i915_private *dev_priv = to_i915(dev);
15428 struct drm_atomic_state *state;
15429 struct intel_atomic_state *intel_state;
15430 struct drm_crtc *crtc;
15431 struct drm_crtc_state *cstate;
15432 struct drm_modeset_acquire_ctx ctx;
15433 int ret;
15434 int i;
15435
15436 /* Only supported on platforms that use atomic watermark design */
15437 if (!dev_priv->display.optimize_watermarks)
15438 return;
15439
15440 /*
15441 * We need to hold connection_mutex before calling duplicate_state so
15442 * that the connector loop is protected.
15443 */
15444 drm_modeset_acquire_init(&ctx, 0);
15445 retry:
15446 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15447 if (ret == -EDEADLK) {
15448 drm_modeset_backoff(&ctx);
15449 goto retry;
15450 } else if (WARN_ON(ret)) {
15451 goto fail;
15452 }
15453
15454 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15455 if (WARN_ON(IS_ERR(state)))
15456 goto fail;
15457
15458 intel_state = to_intel_atomic_state(state);
15459
15460 /*
15461 * Hardware readout is the only time we don't want to calculate
15462 * intermediate watermarks (since we don't trust the current
15463 * watermarks).
15464 */
15465 if (!HAS_GMCH(dev_priv))
15466 intel_state->skip_intermediate_wm = true;
15467
15468 ret = intel_atomic_check(dev, state);
15469 if (ret) {
15470 /*
15471 * If we fail here, it means that the hardware appears to be
15472 * programmed in a way that shouldn't be possible, given our
15473 * understanding of watermark requirements. This might mean a
15474 * mistake in the hardware readout code or a mistake in the
15475 * watermark calculations for a given platform. Raise a WARN
15476 * so that this is noticeable.
15477 *
15478 * If this actually happens, we'll have to just leave the
15479 * BIOS-programmed watermarks untouched and hope for the best.
15480 */
15481 WARN(true, "Could not determine valid watermarks for inherited state\n");
15482 goto put_state;
15483 }
15484
15485 /* Write calculated watermark values back */
15486 for_each_new_crtc_in_state(state, crtc, cstate, i) {
15487 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15488
15489 cs->wm.need_postvbl_update = true;
15490 dev_priv->display.optimize_watermarks(intel_state, cs);
15491
15492 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15493 }
15494
15495 put_state:
15496 drm_atomic_state_put(state);
15497 fail:
15498 drm_modeset_drop_locks(&ctx);
15499 drm_modeset_acquire_fini(&ctx);
15500 }
15501
15502 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15503 {
15504 if (IS_GEN(dev_priv, 5)) {
15505 u32 fdi_pll_clk =
15506 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15507
15508 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15509 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15510 dev_priv->fdi_pll_freq = 270000;
15511 } else {
15512 return;
15513 }
15514
15515 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15516 }
15517
15518 static int intel_initial_commit(struct drm_device *dev)
15519 {
15520 struct drm_atomic_state *state = NULL;
15521 struct drm_modeset_acquire_ctx ctx;
15522 struct drm_crtc *crtc;
15523 struct drm_crtc_state *crtc_state;
15524 int ret = 0;
15525
15526 state = drm_atomic_state_alloc(dev);
15527 if (!state)
15528 return -ENOMEM;
15529
15530 drm_modeset_acquire_init(&ctx, 0);
15531
15532 retry:
15533 state->acquire_ctx = &ctx;
15534
15535 drm_for_each_crtc(crtc, dev) {
15536 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15537 if (IS_ERR(crtc_state)) {
15538 ret = PTR_ERR(crtc_state);
15539 goto out;
15540 }
15541
15542 if (crtc_state->active) {
15543 ret = drm_atomic_add_affected_planes(state, crtc);
15544 if (ret)
15545 goto out;
15546
15547 /*
15548 * FIXME hack to force a LUT update to avoid the
15549 * plane update forcing the pipe gamma on without
15550 * having a proper LUT loaded. Remove once we
15551 * have readout for pipe gamma enable.
15552 */
15553 crtc_state->color_mgmt_changed = true;
15554 }
15555 }
15556
15557 ret = drm_atomic_commit(state);
15558
15559 out:
15560 if (ret == -EDEADLK) {
15561 drm_atomic_state_clear(state);
15562 drm_modeset_backoff(&ctx);
15563 goto retry;
15564 }
15565
15566 drm_atomic_state_put(state);
15567
15568 drm_modeset_drop_locks(&ctx);
15569 drm_modeset_acquire_fini(&ctx);
15570
15571 return ret;
15572 }
15573
15574 int intel_modeset_init(struct drm_device *dev)
15575 {
15576 struct drm_i915_private *dev_priv = to_i915(dev);
15577 struct i915_ggtt *ggtt = &dev_priv->ggtt;
15578 enum pipe pipe;
15579 struct intel_crtc *crtc;
15580 int ret;
15581
15582 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15583
15584 drm_mode_config_init(dev);
15585
15586 dev->mode_config.min_width = 0;
15587 dev->mode_config.min_height = 0;
15588
15589 dev->mode_config.preferred_depth = 24;
15590 dev->mode_config.prefer_shadow = 1;
15591
15592 dev->mode_config.allow_fb_modifiers = true;
15593
15594 dev->mode_config.funcs = &intel_mode_funcs;
15595
15596 init_llist_head(&dev_priv->atomic_helper.free_list);
15597 INIT_WORK(&dev_priv->atomic_helper.free_work,
15598 intel_atomic_helper_free_state_worker);
15599
15600 intel_init_quirks(dev_priv);
15601
15602 intel_fbc_init(dev_priv);
15603
15604 intel_init_pm(dev_priv);
15605
15606 /*
15607 * There may be no VBT; and if the BIOS enabled SSC we can
15608 * just keep using it to avoid unnecessary flicker. Whereas if the
15609 * BIOS isn't using it, don't assume it will work even if the VBT
15610 * indicates as much.
15611 */
15612 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15613 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15614 DREF_SSC1_ENABLE);
15615
15616 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15617 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15618 bios_lvds_use_ssc ? "en" : "dis",
15619 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15620 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15621 }
15622 }
15623
15624 /* maximum framebuffer dimensions */
15625 if (IS_GEN(dev_priv, 2)) {
15626 dev->mode_config.max_width = 2048;
15627 dev->mode_config.max_height = 2048;
15628 } else if (IS_GEN(dev_priv, 3)) {
15629 dev->mode_config.max_width = 4096;
15630 dev->mode_config.max_height = 4096;
15631 } else {
15632 dev->mode_config.max_width = 8192;
15633 dev->mode_config.max_height = 8192;
15634 }
15635
15636 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15637 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15638 dev->mode_config.cursor_height = 1023;
15639 } else if (IS_GEN(dev_priv, 2)) {
15640 dev->mode_config.cursor_width = 64;
15641 dev->mode_config.cursor_height = 64;
15642 } else {
15643 dev->mode_config.cursor_width = 256;
15644 dev->mode_config.cursor_height = 256;
15645 }
15646
15647 dev->mode_config.fb_base = ggtt->gmadr.start;
15648
15649 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15650 INTEL_INFO(dev_priv)->num_pipes,
15651 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15652
15653 for_each_pipe(dev_priv, pipe) {
15654 ret = intel_crtc_init(dev_priv, pipe);
15655 if (ret) {
15656 drm_mode_config_cleanup(dev);
15657 return ret;
15658 }
15659 }
15660
15661 intel_shared_dpll_init(dev);
15662 intel_update_fdi_pll_freq(dev_priv);
15663
15664 intel_update_czclk(dev_priv);
15665 intel_modeset_init_hw(dev);
15666
15667 intel_hdcp_component_init(dev_priv);
15668
15669 if (dev_priv->max_cdclk_freq == 0)
15670 intel_update_max_cdclk(dev_priv);
15671
15672 /* Just disable it once at startup */
15673 i915_disable_vga(dev_priv);
15674 intel_setup_outputs(dev_priv);
15675
15676 drm_modeset_lock_all(dev);
15677 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15678 drm_modeset_unlock_all(dev);
15679
15680 for_each_intel_crtc(dev, crtc) {
15681 struct intel_initial_plane_config plane_config = {};
15682
15683 if (!crtc->active)
15684 continue;
15685
15686 /*
15687 * Note that reserving the BIOS fb up front prevents us
15688 * from stuffing other stolen allocations like the ring
15689 * on top. This prevents some ugliness at boot time, and
15690 * can even allow for smooth boot transitions if the BIOS
15691 * fb is large enough for the active pipe configuration.
15692 */
15693 dev_priv->display.get_initial_plane_config(crtc,
15694 &plane_config);
15695
15696 /*
15697 * If the fb is shared between multiple heads, we'll
15698 * just get the first one.
15699 */
15700 intel_find_initial_plane_obj(crtc, &plane_config);
15701 }
15702
15703 /*
15704 * Make sure hardware watermarks really match the state we read out.
15705 * Note that we need to do this after reconstructing the BIOS fb's
15706 * since the watermark calculation done here will use pstate->fb.
15707 */
15708 if (!HAS_GMCH(dev_priv))
15709 sanitize_watermarks(dev);
15710
15711 /*
15712 * Force all active planes to recompute their states. So that on
15713 * mode_setcrtc after probe, all the intel_plane_state variables
15714 * are already calculated and there is no assert_plane warnings
15715 * during bootup.
15716 */
15717 ret = intel_initial_commit(dev);
15718 if (ret)
15719 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15720
15721 return 0;
15722 }
15723
15724 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15725 {
15726 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15727 /* 640x480@60Hz, ~25175 kHz */
15728 struct dpll clock = {
15729 .m1 = 18,
15730 .m2 = 7,
15731 .p1 = 13,
15732 .p2 = 4,
15733 .n = 2,
15734 };
15735 u32 dpll, fp;
15736 int i;
15737
15738 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15739
15740 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15741 pipe_name(pipe), clock.vco, clock.dot);
15742
15743 fp = i9xx_dpll_compute_fp(&clock);
15744 dpll = DPLL_DVO_2X_MODE |
15745 DPLL_VGA_MODE_DIS |
15746 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15747 PLL_P2_DIVIDE_BY_4 |
15748 PLL_REF_INPUT_DREFCLK |
15749 DPLL_VCO_ENABLE;
15750
15751 I915_WRITE(FP0(pipe), fp);
15752 I915_WRITE(FP1(pipe), fp);
15753
15754 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15755 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15756 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15757 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15758 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15759 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15760 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15761
15762 /*
15763 * Apparently we need to have VGA mode enabled prior to changing
15764 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15765 * dividers, even though the register value does change.
15766 */
15767 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15768 I915_WRITE(DPLL(pipe), dpll);
15769
15770 /* Wait for the clocks to stabilize. */
15771 POSTING_READ(DPLL(pipe));
15772 udelay(150);
15773
15774 /* The pixel multiplier can only be updated once the
15775 * DPLL is enabled and the clocks are stable.
15776 *
15777 * So write it again.
15778 */
15779 I915_WRITE(DPLL(pipe), dpll);
15780
15781 /* We do this three times for luck */
15782 for (i = 0; i < 3 ; i++) {
15783 I915_WRITE(DPLL(pipe), dpll);
15784 POSTING_READ(DPLL(pipe));
15785 udelay(150); /* wait for warmup */
15786 }
15787
15788 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15789 POSTING_READ(PIPECONF(pipe));
15790
15791 intel_wait_for_pipe_scanline_moving(crtc);
15792 }
15793
15794 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15795 {
15796 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15797
15798 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15799 pipe_name(pipe));
15800
15801 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15802 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15803 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15804 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15805 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15806
15807 I915_WRITE(PIPECONF(pipe), 0);
15808 POSTING_READ(PIPECONF(pipe));
15809
15810 intel_wait_for_pipe_scanline_stopped(crtc);
15811
15812 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15813 POSTING_READ(DPLL(pipe));
15814 }
15815
15816 static void
15817 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15818 {
15819 struct intel_crtc *crtc;
15820
15821 if (INTEL_GEN(dev_priv) >= 4)
15822 return;
15823
15824 for_each_intel_crtc(&dev_priv->drm, crtc) {
15825 struct intel_plane *plane =
15826 to_intel_plane(crtc->base.primary);
15827 struct intel_crtc *plane_crtc;
15828 enum pipe pipe;
15829
15830 if (!plane->get_hw_state(plane, &pipe))
15831 continue;
15832
15833 if (pipe == crtc->pipe)
15834 continue;
15835
15836 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15837 plane->base.base.id, plane->base.name);
15838
15839 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15840 intel_plane_disable_noatomic(plane_crtc, plane);
15841 }
15842 }
15843
15844 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15845 {
15846 struct drm_device *dev = crtc->base.dev;
15847 struct intel_encoder *encoder;
15848
15849 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15850 return true;
15851
15852 return false;
15853 }
15854
15855 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15856 {
15857 struct drm_device *dev = encoder->base.dev;
15858 struct intel_connector *connector;
15859
15860 for_each_connector_on_encoder(dev, &encoder->base, connector)
15861 return connector;
15862
15863 return NULL;
15864 }
15865
15866 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15867 enum pipe pch_transcoder)
15868 {
15869 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15870 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15871 }
15872
15873 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15874 struct drm_modeset_acquire_ctx *ctx)
15875 {
15876 struct drm_device *dev = crtc->base.dev;
15877 struct drm_i915_private *dev_priv = to_i915(dev);
15878 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15879 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15880
15881 /* Clear any frame start delays used for debugging left by the BIOS */
15882 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15883 i915_reg_t reg = PIPECONF(cpu_transcoder);
15884
15885 I915_WRITE(reg,
15886 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15887 }
15888
15889 if (crtc_state->base.active) {
15890 struct intel_plane *plane;
15891
15892 /* Disable everything but the primary plane */
15893 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15894 const struct intel_plane_state *plane_state =
15895 to_intel_plane_state(plane->base.state);
15896
15897 if (plane_state->base.visible &&
15898 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15899 intel_plane_disable_noatomic(crtc, plane);
15900 }
15901
15902 /*
15903 * Disable any background color set by the BIOS, but enable the
15904 * gamma and CSC to match how we program our planes.
15905 */
15906 if (INTEL_GEN(dev_priv) >= 9)
15907 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
15908 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
15909 SKL_BOTTOM_COLOR_CSC_ENABLE);
15910 }
15911
15912 /* Adjust the state of the output pipe according to whether we
15913 * have active connectors/encoders. */
15914 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15915 intel_crtc_disable_noatomic(&crtc->base, ctx);
15916
15917 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
15918 /*
15919 * We start out with underrun reporting disabled to avoid races.
15920 * For correct bookkeeping mark this on active crtcs.
15921 *
15922 * Also on gmch platforms we dont have any hardware bits to
15923 * disable the underrun reporting. Which means we need to start
15924 * out with underrun reporting disabled also on inactive pipes,
15925 * since otherwise we'll complain about the garbage we read when
15926 * e.g. coming up after runtime pm.
15927 *
15928 * No protection against concurrent access is required - at
15929 * worst a fifo underrun happens which also sets this to false.
15930 */
15931 crtc->cpu_fifo_underrun_disabled = true;
15932 /*
15933 * We track the PCH trancoder underrun reporting state
15934 * within the crtc. With crtc for pipe A housing the underrun
15935 * reporting state for PCH transcoder A, crtc for pipe B housing
15936 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15937 * and marking underrun reporting as disabled for the non-existing
15938 * PCH transcoders B and C would prevent enabling the south
15939 * error interrupt (see cpt_can_enable_serr_int()).
15940 */
15941 if (has_pch_trancoder(dev_priv, crtc->pipe))
15942 crtc->pch_fifo_underrun_disabled = true;
15943 }
15944 }
15945
15946 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15947 {
15948 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15949
15950 /*
15951 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15952 * the hardware when a high res displays plugged in. DPLL P
15953 * divider is zero, and the pipe timings are bonkers. We'll
15954 * try to disable everything in that case.
15955 *
15956 * FIXME would be nice to be able to sanitize this state
15957 * without several WARNs, but for now let's take the easy
15958 * road.
15959 */
15960 return IS_GEN(dev_priv, 6) &&
15961 crtc_state->base.active &&
15962 crtc_state->shared_dpll &&
15963 crtc_state->port_clock == 0;
15964 }
15965
15966 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15967 {
15968 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15969 struct intel_connector *connector;
15970 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
15971 struct intel_crtc_state *crtc_state = crtc ?
15972 to_intel_crtc_state(crtc->base.state) : NULL;
15973
15974 /* We need to check both for a crtc link (meaning that the
15975 * encoder is active and trying to read from a pipe) and the
15976 * pipe itself being active. */
15977 bool has_active_crtc = crtc_state &&
15978 crtc_state->base.active;
15979
15980 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
15981 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15982 pipe_name(crtc->pipe));
15983 has_active_crtc = false;
15984 }
15985
15986 connector = intel_encoder_find_connector(encoder);
15987 if (connector && !has_active_crtc) {
15988 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15989 encoder->base.base.id,
15990 encoder->base.name);
15991
15992 /* Connector is active, but has no active pipe. This is
15993 * fallout from our resume register restoring. Disable
15994 * the encoder manually again. */
15995 if (crtc_state) {
15996 struct drm_encoder *best_encoder;
15997
15998 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15999 encoder->base.base.id,
16000 encoder->base.name);
16001
16002 /* avoid oopsing in case the hooks consult best_encoder */
16003 best_encoder = connector->base.state->best_encoder;
16004 connector->base.state->best_encoder = &encoder->base;
16005
16006 if (encoder->disable)
16007 encoder->disable(encoder, crtc_state,
16008 connector->base.state);
16009 if (encoder->post_disable)
16010 encoder->post_disable(encoder, crtc_state,
16011 connector->base.state);
16012
16013 connector->base.state->best_encoder = best_encoder;
16014 }
16015 encoder->base.crtc = NULL;
16016
16017 /* Inconsistent output/port/pipe state happens presumably due to
16018 * a bug in one of the get_hw_state functions. Or someplace else
16019 * in our code, like the register restore mess on resume. Clamp
16020 * things to off as a safer default. */
16021
16022 connector->base.dpms = DRM_MODE_DPMS_OFF;
16023 connector->base.encoder = NULL;
16024 }
16025
16026 /* notify opregion of the sanitized encoder state */
16027 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16028
16029 if (INTEL_GEN(dev_priv) >= 11)
16030 icl_sanitize_encoder_pll_mapping(encoder);
16031 }
16032
16033 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16034 {
16035 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16036
16037 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16038 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16039 i915_disable_vga(dev_priv);
16040 }
16041 }
16042
16043 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16044 {
16045 intel_wakeref_t wakeref;
16046
16047 /*
16048 * This function can be called both from intel_modeset_setup_hw_state or
16049 * at a very early point in our resume sequence, where the power well
16050 * structures are not yet restored. Since this function is at a very
16051 * paranoid "someone might have enabled VGA while we were not looking"
16052 * level, just check if the power well is enabled instead of trying to
16053 * follow the "don't touch the power well if we don't need it" policy
16054 * the rest of the driver uses.
16055 */
16056 wakeref = intel_display_power_get_if_enabled(dev_priv,
16057 POWER_DOMAIN_VGA);
16058 if (!wakeref)
16059 return;
16060
16061 i915_redisable_vga_power_on(dev_priv);
16062
16063 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16064 }
16065
16066 /* FIXME read out full plane state for all planes */
16067 static void readout_plane_state(struct drm_i915_private *dev_priv)
16068 {
16069 struct intel_plane *plane;
16070 struct intel_crtc *crtc;
16071
16072 for_each_intel_plane(&dev_priv->drm, plane) {
16073 struct intel_plane_state *plane_state =
16074 to_intel_plane_state(plane->base.state);
16075 struct intel_crtc_state *crtc_state;
16076 enum pipe pipe = PIPE_A;
16077 bool visible;
16078
16079 visible = plane->get_hw_state(plane, &pipe);
16080
16081 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16082 crtc_state = to_intel_crtc_state(crtc->base.state);
16083
16084 intel_set_plane_visible(crtc_state, plane_state, visible);
16085
16086 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16087 plane->base.base.id, plane->base.name,
16088 enableddisabled(visible), pipe_name(pipe));
16089 }
16090
16091 for_each_intel_crtc(&dev_priv->drm, crtc) {
16092 struct intel_crtc_state *crtc_state =
16093 to_intel_crtc_state(crtc->base.state);
16094
16095 fixup_active_planes(crtc_state);
16096 }
16097 }
16098
16099 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16100 {
16101 struct drm_i915_private *dev_priv = to_i915(dev);
16102 enum pipe pipe;
16103 struct intel_crtc *crtc;
16104 struct intel_encoder *encoder;
16105 struct intel_connector *connector;
16106 struct drm_connector_list_iter conn_iter;
16107 int i;
16108
16109 dev_priv->active_crtcs = 0;
16110
16111 for_each_intel_crtc(dev, crtc) {
16112 struct intel_crtc_state *crtc_state =
16113 to_intel_crtc_state(crtc->base.state);
16114
16115 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16116 memset(crtc_state, 0, sizeof(*crtc_state));
16117 crtc_state->base.crtc = &crtc->base;
16118
16119 crtc_state->base.active = crtc_state->base.enable =
16120 dev_priv->display.get_pipe_config(crtc, crtc_state);
16121
16122 crtc->base.enabled = crtc_state->base.enable;
16123 crtc->active = crtc_state->base.active;
16124
16125 if (crtc_state->base.active)
16126 dev_priv->active_crtcs |= 1 << crtc->pipe;
16127
16128 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16129 crtc->base.base.id, crtc->base.name,
16130 enableddisabled(crtc_state->base.active));
16131 }
16132
16133 readout_plane_state(dev_priv);
16134
16135 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16136 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16137
16138 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16139 &pll->state.hw_state);
16140 pll->state.crtc_mask = 0;
16141 for_each_intel_crtc(dev, crtc) {
16142 struct intel_crtc_state *crtc_state =
16143 to_intel_crtc_state(crtc->base.state);
16144
16145 if (crtc_state->base.active &&
16146 crtc_state->shared_dpll == pll)
16147 pll->state.crtc_mask |= 1 << crtc->pipe;
16148 }
16149 pll->active_mask = pll->state.crtc_mask;
16150
16151 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16152 pll->info->name, pll->state.crtc_mask, pll->on);
16153 }
16154
16155 for_each_intel_encoder(dev, encoder) {
16156 pipe = 0;
16157
16158 if (encoder->get_hw_state(encoder, &pipe)) {
16159 struct intel_crtc_state *crtc_state;
16160
16161 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16162 crtc_state = to_intel_crtc_state(crtc->base.state);
16163
16164 encoder->base.crtc = &crtc->base;
16165 encoder->get_config(encoder, crtc_state);
16166 } else {
16167 encoder->base.crtc = NULL;
16168 }
16169
16170 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16171 encoder->base.base.id, encoder->base.name,
16172 enableddisabled(encoder->base.crtc),
16173 pipe_name(pipe));
16174 }
16175
16176 drm_connector_list_iter_begin(dev, &conn_iter);
16177 for_each_intel_connector_iter(connector, &conn_iter) {
16178 if (connector->get_hw_state(connector)) {
16179 connector->base.dpms = DRM_MODE_DPMS_ON;
16180
16181 encoder = connector->encoder;
16182 connector->base.encoder = &encoder->base;
16183
16184 if (encoder->base.crtc &&
16185 encoder->base.crtc->state->active) {
16186 /*
16187 * This has to be done during hardware readout
16188 * because anything calling .crtc_disable may
16189 * rely on the connector_mask being accurate.
16190 */
16191 encoder->base.crtc->state->connector_mask |=
16192 drm_connector_mask(&connector->base);
16193 encoder->base.crtc->state->encoder_mask |=
16194 drm_encoder_mask(&encoder->base);
16195 }
16196
16197 } else {
16198 connector->base.dpms = DRM_MODE_DPMS_OFF;
16199 connector->base.encoder = NULL;
16200 }
16201 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16202 connector->base.base.id, connector->base.name,
16203 enableddisabled(connector->base.encoder));
16204 }
16205 drm_connector_list_iter_end(&conn_iter);
16206
16207 for_each_intel_crtc(dev, crtc) {
16208 struct intel_crtc_state *crtc_state =
16209 to_intel_crtc_state(crtc->base.state);
16210 int min_cdclk = 0;
16211
16212 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16213 if (crtc_state->base.active) {
16214 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16215 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16216 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16217 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16218 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16219
16220 /*
16221 * The initial mode needs to be set in order to keep
16222 * the atomic core happy. It wants a valid mode if the
16223 * crtc's enabled, so we do the above call.
16224 *
16225 * But we don't set all the derived state fully, hence
16226 * set a flag to indicate that a full recalculation is
16227 * needed on the next commit.
16228 */
16229 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16230
16231 intel_crtc_compute_pixel_rate(crtc_state);
16232
16233 if (dev_priv->display.modeset_calc_cdclk) {
16234 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16235 if (WARN_ON(min_cdclk < 0))
16236 min_cdclk = 0;
16237 }
16238
16239 drm_calc_timestamping_constants(&crtc->base,
16240 &crtc_state->base.adjusted_mode);
16241 update_scanline_offset(crtc_state);
16242 }
16243
16244 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16245 dev_priv->min_voltage_level[crtc->pipe] =
16246 crtc_state->min_voltage_level;
16247
16248 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16249 }
16250 }
16251
16252 static void
16253 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16254 {
16255 struct intel_encoder *encoder;
16256
16257 for_each_intel_encoder(&dev_priv->drm, encoder) {
16258 u64 get_domains;
16259 enum intel_display_power_domain domain;
16260 struct intel_crtc_state *crtc_state;
16261
16262 if (!encoder->get_power_domains)
16263 continue;
16264
16265 /*
16266 * MST-primary and inactive encoders don't have a crtc state
16267 * and neither of these require any power domain references.
16268 */
16269 if (!encoder->base.crtc)
16270 continue;
16271
16272 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16273 get_domains = encoder->get_power_domains(encoder, crtc_state);
16274 for_each_power_domain(domain, get_domains)
16275 intel_display_power_get(dev_priv, domain);
16276 }
16277 }
16278
16279 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16280 {
16281 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16282 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16283 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16284 DARBF_GATING_DIS);
16285
16286 if (IS_HASWELL(dev_priv)) {
16287 /*
16288 * WaRsPkgCStateDisplayPMReq:hsw
16289 * System hang if this isn't done before disabling all planes!
16290 */
16291 I915_WRITE(CHICKEN_PAR1_1,
16292 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16293 }
16294 }
16295
16296 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16297 enum port port, i915_reg_t hdmi_reg)
16298 {
16299 u32 val = I915_READ(hdmi_reg);
16300
16301 if (val & SDVO_ENABLE ||
16302 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16303 return;
16304
16305 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16306 port_name(port));
16307
16308 val &= ~SDVO_PIPE_SEL_MASK;
16309 val |= SDVO_PIPE_SEL(PIPE_A);
16310
16311 I915_WRITE(hdmi_reg, val);
16312 }
16313
16314 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16315 enum port port, i915_reg_t dp_reg)
16316 {
16317 u32 val = I915_READ(dp_reg);
16318
16319 if (val & DP_PORT_EN ||
16320 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16321 return;
16322
16323 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16324 port_name(port));
16325
16326 val &= ~DP_PIPE_SEL_MASK;
16327 val |= DP_PIPE_SEL(PIPE_A);
16328
16329 I915_WRITE(dp_reg, val);
16330 }
16331
16332 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16333 {
16334 /*
16335 * The BIOS may select transcoder B on some of the PCH
16336 * ports even it doesn't enable the port. This would trip
16337 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16338 * Sanitize the transcoder select bits to prevent that. We
16339 * assume that the BIOS never actually enabled the port,
16340 * because if it did we'd actually have to toggle the port
16341 * on and back off to make the transcoder A select stick
16342 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16343 * intel_disable_sdvo()).
16344 */
16345 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16346 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16347 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16348
16349 /* PCH SDVOB multiplex with HDMIB */
16350 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16351 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16352 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16353 }
16354
16355 /* Scan out the current hw modeset state,
16356 * and sanitizes it to the current state
16357 */
16358 static void
16359 intel_modeset_setup_hw_state(struct drm_device *dev,
16360 struct drm_modeset_acquire_ctx *ctx)
16361 {
16362 struct drm_i915_private *dev_priv = to_i915(dev);
16363 struct intel_crtc_state *crtc_state;
16364 struct intel_encoder *encoder;
16365 struct intel_crtc *crtc;
16366 intel_wakeref_t wakeref;
16367 int i;
16368
16369 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16370
16371 intel_early_display_was(dev_priv);
16372 intel_modeset_readout_hw_state(dev);
16373
16374 /* HW state is read out, now we need to sanitize this mess. */
16375 get_encoder_power_domains(dev_priv);
16376
16377 if (HAS_PCH_IBX(dev_priv))
16378 ibx_sanitize_pch_ports(dev_priv);
16379
16380 /*
16381 * intel_sanitize_plane_mapping() may need to do vblank
16382 * waits, so we need vblank interrupts restored beforehand.
16383 */
16384 for_each_intel_crtc(&dev_priv->drm, crtc) {
16385 crtc_state = to_intel_crtc_state(crtc->base.state);
16386
16387 drm_crtc_vblank_reset(&crtc->base);
16388
16389 if (crtc_state->base.active)
16390 intel_crtc_vblank_on(crtc_state);
16391 }
16392
16393 intel_sanitize_plane_mapping(dev_priv);
16394
16395 for_each_intel_encoder(dev, encoder)
16396 intel_sanitize_encoder(encoder);
16397
16398 for_each_intel_crtc(&dev_priv->drm, crtc) {
16399 crtc_state = to_intel_crtc_state(crtc->base.state);
16400 intel_sanitize_crtc(crtc, ctx);
16401 intel_dump_pipe_config(crtc, crtc_state,
16402 "[setup_hw_state]");
16403 }
16404
16405 intel_modeset_update_connector_atomic_state(dev);
16406
16407 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16408 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16409
16410 if (!pll->on || pll->active_mask)
16411 continue;
16412
16413 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16414 pll->info->name);
16415
16416 pll->info->funcs->disable(dev_priv, pll);
16417 pll->on = false;
16418 }
16419
16420 if (IS_G4X(dev_priv)) {
16421 g4x_wm_get_hw_state(dev_priv);
16422 g4x_wm_sanitize(dev_priv);
16423 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16424 vlv_wm_get_hw_state(dev_priv);
16425 vlv_wm_sanitize(dev_priv);
16426 } else if (INTEL_GEN(dev_priv) >= 9) {
16427 skl_wm_get_hw_state(dev_priv);
16428 } else if (HAS_PCH_SPLIT(dev_priv)) {
16429 ilk_wm_get_hw_state(dev_priv);
16430 }
16431
16432 for_each_intel_crtc(dev, crtc) {
16433 u64 put_domains;
16434
16435 crtc_state = to_intel_crtc_state(crtc->base.state);
16436 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16437 if (WARN_ON(put_domains))
16438 modeset_put_power_domains(dev_priv, put_domains);
16439 }
16440
16441 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16442
16443 intel_fbc_init_pipe_state(dev_priv);
16444 }
16445
16446 void intel_display_resume(struct drm_device *dev)
16447 {
16448 struct drm_i915_private *dev_priv = to_i915(dev);
16449 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16450 struct drm_modeset_acquire_ctx ctx;
16451 int ret;
16452
16453 dev_priv->modeset_restore_state = NULL;
16454 if (state)
16455 state->acquire_ctx = &ctx;
16456
16457 drm_modeset_acquire_init(&ctx, 0);
16458
16459 while (1) {
16460 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16461 if (ret != -EDEADLK)
16462 break;
16463
16464 drm_modeset_backoff(&ctx);
16465 }
16466
16467 if (!ret)
16468 ret = __intel_display_resume(dev, state, &ctx);
16469
16470 intel_enable_ipc(dev_priv);
16471 drm_modeset_drop_locks(&ctx);
16472 drm_modeset_acquire_fini(&ctx);
16473
16474 if (ret)
16475 DRM_ERROR("Restoring old state failed with %i\n", ret);
16476 if (state)
16477 drm_atomic_state_put(state);
16478 }
16479
16480 static void intel_hpd_poll_fini(struct drm_device *dev)
16481 {
16482 struct intel_connector *connector;
16483 struct drm_connector_list_iter conn_iter;
16484
16485 /* Kill all the work that may have been queued by hpd. */
16486 drm_connector_list_iter_begin(dev, &conn_iter);
16487 for_each_intel_connector_iter(connector, &conn_iter) {
16488 if (connector->modeset_retry_work.func)
16489 cancel_work_sync(&connector->modeset_retry_work);
16490 if (connector->hdcp.shim) {
16491 cancel_delayed_work_sync(&connector->hdcp.check_work);
16492 cancel_work_sync(&connector->hdcp.prop_work);
16493 }
16494 }
16495 drm_connector_list_iter_end(&conn_iter);
16496 }
16497
16498 void intel_modeset_cleanup(struct drm_device *dev)
16499 {
16500 struct drm_i915_private *dev_priv = to_i915(dev);
16501
16502 flush_workqueue(dev_priv->modeset_wq);
16503
16504 flush_work(&dev_priv->atomic_helper.free_work);
16505 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16506
16507 /*
16508 * Interrupts and polling as the first thing to avoid creating havoc.
16509 * Too much stuff here (turning of connectors, ...) would
16510 * experience fancy races otherwise.
16511 */
16512 intel_irq_uninstall(dev_priv);
16513
16514 /*
16515 * Due to the hpd irq storm handling the hotplug work can re-arm the
16516 * poll handlers. Hence disable polling after hpd handling is shut down.
16517 */
16518 intel_hpd_poll_fini(dev);
16519
16520 /* poll work can call into fbdev, hence clean that up afterwards */
16521 intel_fbdev_fini(dev_priv);
16522
16523 intel_unregister_dsm_handler();
16524
16525 intel_fbc_global_disable(dev_priv);
16526
16527 /* flush any delayed tasks or pending work */
16528 flush_scheduled_work();
16529
16530 intel_hdcp_component_fini(dev_priv);
16531
16532 drm_mode_config_cleanup(dev);
16533
16534 intel_overlay_cleanup(dev_priv);
16535
16536 intel_teardown_gmbus(dev_priv);
16537
16538 destroy_workqueue(dev_priv->modeset_wq);
16539
16540 intel_fbc_cleanup_cfb(dev_priv);
16541 }
16542
16543 /*
16544 * set vga decode state - true == enable VGA decode
16545 */
16546 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16547 {
16548 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16549 u16 gmch_ctrl;
16550
16551 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16552 DRM_ERROR("failed to read control word\n");
16553 return -EIO;
16554 }
16555
16556 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16557 return 0;
16558
16559 if (state)
16560 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16561 else
16562 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16563
16564 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16565 DRM_ERROR("failed to write control word\n");
16566 return -EIO;
16567 }
16568
16569 return 0;
16570 }
16571
16572 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16573
16574 struct intel_display_error_state {
16575
16576 u32 power_well_driver;
16577
16578 struct intel_cursor_error_state {
16579 u32 control;
16580 u32 position;
16581 u32 base;
16582 u32 size;
16583 } cursor[I915_MAX_PIPES];
16584
16585 struct intel_pipe_error_state {
16586 bool power_domain_on;
16587 u32 source;
16588 u32 stat;
16589 } pipe[I915_MAX_PIPES];
16590
16591 struct intel_plane_error_state {
16592 u32 control;
16593 u32 stride;
16594 u32 size;
16595 u32 pos;
16596 u32 addr;
16597 u32 surface;
16598 u32 tile_offset;
16599 } plane[I915_MAX_PIPES];
16600
16601 struct intel_transcoder_error_state {
16602 bool available;
16603 bool power_domain_on;
16604 enum transcoder cpu_transcoder;
16605
16606 u32 conf;
16607
16608 u32 htotal;
16609 u32 hblank;
16610 u32 hsync;
16611 u32 vtotal;
16612 u32 vblank;
16613 u32 vsync;
16614 } transcoder[4];
16615 };
16616
16617 struct intel_display_error_state *
16618 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16619 {
16620 struct intel_display_error_state *error;
16621 int transcoders[] = {
16622 TRANSCODER_A,
16623 TRANSCODER_B,
16624 TRANSCODER_C,
16625 TRANSCODER_EDP,
16626 };
16627 int i;
16628
16629 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
16630
16631 if (!HAS_DISPLAY(dev_priv))
16632 return NULL;
16633
16634 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16635 if (error == NULL)
16636 return NULL;
16637
16638 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16639 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16640
16641 for_each_pipe(dev_priv, i) {
16642 error->pipe[i].power_domain_on =
16643 __intel_display_power_is_enabled(dev_priv,
16644 POWER_DOMAIN_PIPE(i));
16645 if (!error->pipe[i].power_domain_on)
16646 continue;
16647
16648 error->cursor[i].control = I915_READ(CURCNTR(i));
16649 error->cursor[i].position = I915_READ(CURPOS(i));
16650 error->cursor[i].base = I915_READ(CURBASE(i));
16651
16652 error->plane[i].control = I915_READ(DSPCNTR(i));
16653 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16654 if (INTEL_GEN(dev_priv) <= 3) {
16655 error->plane[i].size = I915_READ(DSPSIZE(i));
16656 error->plane[i].pos = I915_READ(DSPPOS(i));
16657 }
16658 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16659 error->plane[i].addr = I915_READ(DSPADDR(i));
16660 if (INTEL_GEN(dev_priv) >= 4) {
16661 error->plane[i].surface = I915_READ(DSPSURF(i));
16662 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16663 }
16664
16665 error->pipe[i].source = I915_READ(PIPESRC(i));
16666
16667 if (HAS_GMCH(dev_priv))
16668 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16669 }
16670
16671 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16672 enum transcoder cpu_transcoder = transcoders[i];
16673
16674 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
16675 continue;
16676
16677 error->transcoder[i].available = true;
16678 error->transcoder[i].power_domain_on =
16679 __intel_display_power_is_enabled(dev_priv,
16680 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16681 if (!error->transcoder[i].power_domain_on)
16682 continue;
16683
16684 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16685
16686 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16687 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16688 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16689 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16690 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16691 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16692 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16693 }
16694
16695 return error;
16696 }
16697
16698 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16699
16700 void
16701 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16702 struct intel_display_error_state *error)
16703 {
16704 struct drm_i915_private *dev_priv = m->i915;
16705 int i;
16706
16707 if (!error)
16708 return;
16709
16710 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16711 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16712 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16713 error->power_well_driver);
16714 for_each_pipe(dev_priv, i) {
16715 err_printf(m, "Pipe [%d]:\n", i);
16716 err_printf(m, " Power: %s\n",
16717 onoff(error->pipe[i].power_domain_on));
16718 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
16719 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
16720
16721 err_printf(m, "Plane [%d]:\n", i);
16722 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16723 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
16724 if (INTEL_GEN(dev_priv) <= 3) {
16725 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16726 err_printf(m, " POS: %08x\n", error->plane[i].pos);
16727 }
16728 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16729 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
16730 if (INTEL_GEN(dev_priv) >= 4) {
16731 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16732 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
16733 }
16734
16735 err_printf(m, "Cursor [%d]:\n", i);
16736 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16737 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16738 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
16739 }
16740
16741 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16742 if (!error->transcoder[i].available)
16743 continue;
16744
16745 err_printf(m, "CPU transcoder: %s\n",
16746 transcoder_name(error->transcoder[i].cpu_transcoder));
16747 err_printf(m, " Power: %s\n",
16748 onoff(error->transcoder[i].power_domain_on));
16749 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16750 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16751 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16752 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16753 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16754 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16755 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16756 }
16757 }
16758
16759 #endif