]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Get power refs in encoder->get_power_domains()
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_drv.h"
51 #include "intel_dsi.h"
52 #include "intel_frontbuffer.h"
53
54 #include "intel_drv.h"
55 #include "intel_dsi.h"
56 #include "intel_frontbuffer.h"
57
58 #include "i915_drv.h"
59 #include "i915_gem_clflush.h"
60 #include "i915_reset.h"
61 #include "i915_trace.h"
62
63 /* Primary plane formats for gen <= 3 */
64 static const u32 i8xx_primary_formats[] = {
65 DRM_FORMAT_C8,
66 DRM_FORMAT_RGB565,
67 DRM_FORMAT_XRGB1555,
68 DRM_FORMAT_XRGB8888,
69 };
70
71 /* Primary plane formats for gen >= 4 */
72 static const u32 i965_primary_formats[] = {
73 DRM_FORMAT_C8,
74 DRM_FORMAT_RGB565,
75 DRM_FORMAT_XRGB8888,
76 DRM_FORMAT_XBGR8888,
77 DRM_FORMAT_XRGB2101010,
78 DRM_FORMAT_XBGR2101010,
79 };
80
81 static const u64 i9xx_format_modifiers[] = {
82 I915_FORMAT_MOD_X_TILED,
83 DRM_FORMAT_MOD_LINEAR,
84 DRM_FORMAT_MOD_INVALID
85 };
86
87 /* Cursor formats */
88 static const u32 intel_cursor_formats[] = {
89 DRM_FORMAT_ARGB8888,
90 };
91
92 static const u64 cursor_format_modifiers[] = {
93 DRM_FORMAT_MOD_LINEAR,
94 DRM_FORMAT_MOD_INVALID
95 };
96
97 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
98 struct intel_crtc_state *pipe_config);
99 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
100 struct intel_crtc_state *pipe_config);
101
102 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
103 struct drm_i915_gem_object *obj,
104 struct drm_mode_fb_cmd2 *mode_cmd);
105 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
106 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
107 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
108 const struct intel_link_m_n *m_n,
109 const struct intel_link_m_n *m2_n2);
110 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
111 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
112 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
113 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
114 static void vlv_prepare_pll(struct intel_crtc *crtc,
115 const struct intel_crtc_state *pipe_config);
116 static void chv_prepare_pll(struct intel_crtc *crtc,
117 const struct intel_crtc_state *pipe_config);
118 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
119 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
120 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
121 struct intel_crtc_state *crtc_state);
122 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
123 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
124 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126 struct drm_modeset_acquire_ctx *ctx);
127 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
128
129 struct intel_limit {
130 struct {
131 int min, max;
132 } dot, vco, n, m, m1, m2, p, p1;
133
134 struct {
135 int dot_limit;
136 int p2_slow, p2_fast;
137 } p2;
138 };
139
140 /* returns HPLL frequency in kHz */
141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
142 {
143 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
144
145 /* Obtain SKU information */
146 mutex_lock(&dev_priv->sb_lock);
147 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148 CCK_FUSE_HPLL_FREQ_MASK;
149 mutex_unlock(&dev_priv->sb_lock);
150
151 return vco_freq[hpll_freq] * 1000;
152 }
153
154 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
155 const char *name, u32 reg, int ref_freq)
156 {
157 u32 val;
158 int divider;
159
160 mutex_lock(&dev_priv->sb_lock);
161 val = vlv_cck_read(dev_priv, reg);
162 mutex_unlock(&dev_priv->sb_lock);
163
164 divider = val & CCK_FREQUENCY_VALUES;
165
166 WARN((val & CCK_FREQUENCY_STATUS) !=
167 (divider << CCK_FREQUENCY_STATUS_SHIFT),
168 "%s change in progress\n", name);
169
170 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
171 }
172
173 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
174 const char *name, u32 reg)
175 {
176 if (dev_priv->hpll_freq == 0)
177 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
178
179 return vlv_get_cck_clock(dev_priv, name, reg,
180 dev_priv->hpll_freq);
181 }
182
183 static void intel_update_czclk(struct drm_i915_private *dev_priv)
184 {
185 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
186 return;
187
188 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
189 CCK_CZ_CLOCK_CONTROL);
190
191 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
192 }
193
194 static inline u32 /* units of 100MHz */
195 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
196 const struct intel_crtc_state *pipe_config)
197 {
198 if (HAS_DDI(dev_priv))
199 return pipe_config->port_clock; /* SPLL */
200 else
201 return dev_priv->fdi_pll_freq;
202 }
203
204 static const struct intel_limit intel_limits_i8xx_dac = {
205 .dot = { .min = 25000, .max = 350000 },
206 .vco = { .min = 908000, .max = 1512000 },
207 .n = { .min = 2, .max = 16 },
208 .m = { .min = 96, .max = 140 },
209 .m1 = { .min = 18, .max = 26 },
210 .m2 = { .min = 6, .max = 16 },
211 .p = { .min = 4, .max = 128 },
212 .p1 = { .min = 2, .max = 33 },
213 .p2 = { .dot_limit = 165000,
214 .p2_slow = 4, .p2_fast = 2 },
215 };
216
217 static const struct intel_limit intel_limits_i8xx_dvo = {
218 .dot = { .min = 25000, .max = 350000 },
219 .vco = { .min = 908000, .max = 1512000 },
220 .n = { .min = 2, .max = 16 },
221 .m = { .min = 96, .max = 140 },
222 .m1 = { .min = 18, .max = 26 },
223 .m2 = { .min = 6, .max = 16 },
224 .p = { .min = 4, .max = 128 },
225 .p1 = { .min = 2, .max = 33 },
226 .p2 = { .dot_limit = 165000,
227 .p2_slow = 4, .p2_fast = 4 },
228 };
229
230 static const struct intel_limit intel_limits_i8xx_lvds = {
231 .dot = { .min = 25000, .max = 350000 },
232 .vco = { .min = 908000, .max = 1512000 },
233 .n = { .min = 2, .max = 16 },
234 .m = { .min = 96, .max = 140 },
235 .m1 = { .min = 18, .max = 26 },
236 .m2 = { .min = 6, .max = 16 },
237 .p = { .min = 4, .max = 128 },
238 .p1 = { .min = 1, .max = 6 },
239 .p2 = { .dot_limit = 165000,
240 .p2_slow = 14, .p2_fast = 7 },
241 };
242
243 static const struct intel_limit intel_limits_i9xx_sdvo = {
244 .dot = { .min = 20000, .max = 400000 },
245 .vco = { .min = 1400000, .max = 2800000 },
246 .n = { .min = 1, .max = 6 },
247 .m = { .min = 70, .max = 120 },
248 .m1 = { .min = 8, .max = 18 },
249 .m2 = { .min = 3, .max = 7 },
250 .p = { .min = 5, .max = 80 },
251 .p1 = { .min = 1, .max = 8 },
252 .p2 = { .dot_limit = 200000,
253 .p2_slow = 10, .p2_fast = 5 },
254 };
255
256 static const struct intel_limit intel_limits_i9xx_lvds = {
257 .dot = { .min = 20000, .max = 400000 },
258 .vco = { .min = 1400000, .max = 2800000 },
259 .n = { .min = 1, .max = 6 },
260 .m = { .min = 70, .max = 120 },
261 .m1 = { .min = 8, .max = 18 },
262 .m2 = { .min = 3, .max = 7 },
263 .p = { .min = 7, .max = 98 },
264 .p1 = { .min = 1, .max = 8 },
265 .p2 = { .dot_limit = 112000,
266 .p2_slow = 14, .p2_fast = 7 },
267 };
268
269
270 static const struct intel_limit intel_limits_g4x_sdvo = {
271 .dot = { .min = 25000, .max = 270000 },
272 .vco = { .min = 1750000, .max = 3500000},
273 .n = { .min = 1, .max = 4 },
274 .m = { .min = 104, .max = 138 },
275 .m1 = { .min = 17, .max = 23 },
276 .m2 = { .min = 5, .max = 11 },
277 .p = { .min = 10, .max = 30 },
278 .p1 = { .min = 1, .max = 3},
279 .p2 = { .dot_limit = 270000,
280 .p2_slow = 10,
281 .p2_fast = 10
282 },
283 };
284
285 static const struct intel_limit intel_limits_g4x_hdmi = {
286 .dot = { .min = 22000, .max = 400000 },
287 .vco = { .min = 1750000, .max = 3500000},
288 .n = { .min = 1, .max = 4 },
289 .m = { .min = 104, .max = 138 },
290 .m1 = { .min = 16, .max = 23 },
291 .m2 = { .min = 5, .max = 11 },
292 .p = { .min = 5, .max = 80 },
293 .p1 = { .min = 1, .max = 8},
294 .p2 = { .dot_limit = 165000,
295 .p2_slow = 10, .p2_fast = 5 },
296 };
297
298 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
299 .dot = { .min = 20000, .max = 115000 },
300 .vco = { .min = 1750000, .max = 3500000 },
301 .n = { .min = 1, .max = 3 },
302 .m = { .min = 104, .max = 138 },
303 .m1 = { .min = 17, .max = 23 },
304 .m2 = { .min = 5, .max = 11 },
305 .p = { .min = 28, .max = 112 },
306 .p1 = { .min = 2, .max = 8 },
307 .p2 = { .dot_limit = 0,
308 .p2_slow = 14, .p2_fast = 14
309 },
310 };
311
312 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
313 .dot = { .min = 80000, .max = 224000 },
314 .vco = { .min = 1750000, .max = 3500000 },
315 .n = { .min = 1, .max = 3 },
316 .m = { .min = 104, .max = 138 },
317 .m1 = { .min = 17, .max = 23 },
318 .m2 = { .min = 5, .max = 11 },
319 .p = { .min = 14, .max = 42 },
320 .p1 = { .min = 2, .max = 6 },
321 .p2 = { .dot_limit = 0,
322 .p2_slow = 7, .p2_fast = 7
323 },
324 };
325
326 static const struct intel_limit intel_limits_pineview_sdvo = {
327 .dot = { .min = 20000, .max = 400000},
328 .vco = { .min = 1700000, .max = 3500000 },
329 /* Pineview's Ncounter is a ring counter */
330 .n = { .min = 3, .max = 6 },
331 .m = { .min = 2, .max = 256 },
332 /* Pineview only has one combined m divider, which we treat as m2. */
333 .m1 = { .min = 0, .max = 0 },
334 .m2 = { .min = 0, .max = 254 },
335 .p = { .min = 5, .max = 80 },
336 .p1 = { .min = 1, .max = 8 },
337 .p2 = { .dot_limit = 200000,
338 .p2_slow = 10, .p2_fast = 5 },
339 };
340
341 static const struct intel_limit intel_limits_pineview_lvds = {
342 .dot = { .min = 20000, .max = 400000 },
343 .vco = { .min = 1700000, .max = 3500000 },
344 .n = { .min = 3, .max = 6 },
345 .m = { .min = 2, .max = 256 },
346 .m1 = { .min = 0, .max = 0 },
347 .m2 = { .min = 0, .max = 254 },
348 .p = { .min = 7, .max = 112 },
349 .p1 = { .min = 1, .max = 8 },
350 .p2 = { .dot_limit = 112000,
351 .p2_slow = 14, .p2_fast = 14 },
352 };
353
354 /* Ironlake / Sandybridge
355 *
356 * We calculate clock using (register_value + 2) for N/M1/M2, so here
357 * the range value for them is (actual_value - 2).
358 */
359 static const struct intel_limit intel_limits_ironlake_dac = {
360 .dot = { .min = 25000, .max = 350000 },
361 .vco = { .min = 1760000, .max = 3510000 },
362 .n = { .min = 1, .max = 5 },
363 .m = { .min = 79, .max = 127 },
364 .m1 = { .min = 12, .max = 22 },
365 .m2 = { .min = 5, .max = 9 },
366 .p = { .min = 5, .max = 80 },
367 .p1 = { .min = 1, .max = 8 },
368 .p2 = { .dot_limit = 225000,
369 .p2_slow = 10, .p2_fast = 5 },
370 };
371
372 static const struct intel_limit intel_limits_ironlake_single_lvds = {
373 .dot = { .min = 25000, .max = 350000 },
374 .vco = { .min = 1760000, .max = 3510000 },
375 .n = { .min = 1, .max = 3 },
376 .m = { .min = 79, .max = 118 },
377 .m1 = { .min = 12, .max = 22 },
378 .m2 = { .min = 5, .max = 9 },
379 .p = { .min = 28, .max = 112 },
380 .p1 = { .min = 2, .max = 8 },
381 .p2 = { .dot_limit = 225000,
382 .p2_slow = 14, .p2_fast = 14 },
383 };
384
385 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
386 .dot = { .min = 25000, .max = 350000 },
387 .vco = { .min = 1760000, .max = 3510000 },
388 .n = { .min = 1, .max = 3 },
389 .m = { .min = 79, .max = 127 },
390 .m1 = { .min = 12, .max = 22 },
391 .m2 = { .min = 5, .max = 9 },
392 .p = { .min = 14, .max = 56 },
393 .p1 = { .min = 2, .max = 8 },
394 .p2 = { .dot_limit = 225000,
395 .p2_slow = 7, .p2_fast = 7 },
396 };
397
398 /* LVDS 100mhz refclk limits. */
399 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
400 .dot = { .min = 25000, .max = 350000 },
401 .vco = { .min = 1760000, .max = 3510000 },
402 .n = { .min = 1, .max = 2 },
403 .m = { .min = 79, .max = 126 },
404 .m1 = { .min = 12, .max = 22 },
405 .m2 = { .min = 5, .max = 9 },
406 .p = { .min = 28, .max = 112 },
407 .p1 = { .min = 2, .max = 8 },
408 .p2 = { .dot_limit = 225000,
409 .p2_slow = 14, .p2_fast = 14 },
410 };
411
412 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
413 .dot = { .min = 25000, .max = 350000 },
414 .vco = { .min = 1760000, .max = 3510000 },
415 .n = { .min = 1, .max = 3 },
416 .m = { .min = 79, .max = 126 },
417 .m1 = { .min = 12, .max = 22 },
418 .m2 = { .min = 5, .max = 9 },
419 .p = { .min = 14, .max = 42 },
420 .p1 = { .min = 2, .max = 6 },
421 .p2 = { .dot_limit = 225000,
422 .p2_slow = 7, .p2_fast = 7 },
423 };
424
425 static const struct intel_limit intel_limits_vlv = {
426 /*
427 * These are the data rate limits (measured in fast clocks)
428 * since those are the strictest limits we have. The fast
429 * clock and actual rate limits are more relaxed, so checking
430 * them would make no difference.
431 */
432 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
433 .vco = { .min = 4000000, .max = 6000000 },
434 .n = { .min = 1, .max = 7 },
435 .m1 = { .min = 2, .max = 3 },
436 .m2 = { .min = 11, .max = 156 },
437 .p1 = { .min = 2, .max = 3 },
438 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
439 };
440
441 static const struct intel_limit intel_limits_chv = {
442 /*
443 * These are the data rate limits (measured in fast clocks)
444 * since those are the strictest limits we have. The fast
445 * clock and actual rate limits are more relaxed, so checking
446 * them would make no difference.
447 */
448 .dot = { .min = 25000 * 5, .max = 540000 * 5},
449 .vco = { .min = 4800000, .max = 6480000 },
450 .n = { .min = 1, .max = 1 },
451 .m1 = { .min = 2, .max = 2 },
452 .m2 = { .min = 24 << 22, .max = 175 << 22 },
453 .p1 = { .min = 2, .max = 4 },
454 .p2 = { .p2_slow = 1, .p2_fast = 14 },
455 };
456
457 static const struct intel_limit intel_limits_bxt = {
458 /* FIXME: find real dot limits */
459 .dot = { .min = 0, .max = INT_MAX },
460 .vco = { .min = 4800000, .max = 6700000 },
461 .n = { .min = 1, .max = 1 },
462 .m1 = { .min = 2, .max = 2 },
463 /* FIXME: find real m2 limits */
464 .m2 = { .min = 2 << 22, .max = 255 << 22 },
465 .p1 = { .min = 2, .max = 4 },
466 .p2 = { .p2_slow = 1, .p2_fast = 20 },
467 };
468
469 static void
470 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
471 {
472 if (enable)
473 I915_WRITE(CLKGATE_DIS_PSL(pipe),
474 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
475 else
476 I915_WRITE(CLKGATE_DIS_PSL(pipe),
477 I915_READ(CLKGATE_DIS_PSL(pipe)) &
478 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
479 }
480
481 static bool
482 needs_modeset(const struct drm_crtc_state *state)
483 {
484 return drm_atomic_crtc_needs_modeset(state);
485 }
486
487 /*
488 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
489 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
490 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
491 * The helpers' return value is the rate of the clock that is fed to the
492 * display engine's pipe which can be the above fast dot clock rate or a
493 * divided-down version of it.
494 */
495 /* m1 is reserved as 0 in Pineview, n is a ring counter */
496 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
497 {
498 clock->m = clock->m2 + 2;
499 clock->p = clock->p1 * clock->p2;
500 if (WARN_ON(clock->n == 0 || clock->p == 0))
501 return 0;
502 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
503 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
504
505 return clock->dot;
506 }
507
508 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
509 {
510 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
511 }
512
513 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
514 {
515 clock->m = i9xx_dpll_compute_m(clock);
516 clock->p = clock->p1 * clock->p2;
517 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
518 return 0;
519 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
520 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
521
522 return clock->dot;
523 }
524
525 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
526 {
527 clock->m = clock->m1 * clock->m2;
528 clock->p = clock->p1 * clock->p2;
529 if (WARN_ON(clock->n == 0 || clock->p == 0))
530 return 0;
531 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
532 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
533
534 return clock->dot / 5;
535 }
536
537 int chv_calc_dpll_params(int refclk, struct dpll *clock)
538 {
539 clock->m = clock->m1 * clock->m2;
540 clock->p = clock->p1 * clock->p2;
541 if (WARN_ON(clock->n == 0 || clock->p == 0))
542 return 0;
543 clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
544 clock->n << 22);
545 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
546
547 return clock->dot / 5;
548 }
549
550 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
551
552 /*
553 * Returns whether the given set of divisors are valid for a given refclk with
554 * the given connectors.
555 */
556 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
557 const struct intel_limit *limit,
558 const struct dpll *clock)
559 {
560 if (clock->n < limit->n.min || limit->n.max < clock->n)
561 INTELPllInvalid("n out of range\n");
562 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
563 INTELPllInvalid("p1 out of range\n");
564 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
565 INTELPllInvalid("m2 out of range\n");
566 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
567 INTELPllInvalid("m1 out of range\n");
568
569 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
570 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
571 if (clock->m1 <= clock->m2)
572 INTELPllInvalid("m1 <= m2\n");
573
574 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
575 !IS_GEN9_LP(dev_priv)) {
576 if (clock->p < limit->p.min || limit->p.max < clock->p)
577 INTELPllInvalid("p out of range\n");
578 if (clock->m < limit->m.min || limit->m.max < clock->m)
579 INTELPllInvalid("m out of range\n");
580 }
581
582 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
583 INTELPllInvalid("vco out of range\n");
584 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
585 * connector, etc., rather than just a single range.
586 */
587 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
588 INTELPllInvalid("dot out of range\n");
589
590 return true;
591 }
592
593 static int
594 i9xx_select_p2_div(const struct intel_limit *limit,
595 const struct intel_crtc_state *crtc_state,
596 int target)
597 {
598 struct drm_device *dev = crtc_state->base.crtc->dev;
599
600 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
601 /*
602 * For LVDS just rely on its current settings for dual-channel.
603 * We haven't figured out how to reliably set up different
604 * single/dual channel state, if we even can.
605 */
606 if (intel_is_dual_link_lvds(dev))
607 return limit->p2.p2_fast;
608 else
609 return limit->p2.p2_slow;
610 } else {
611 if (target < limit->p2.dot_limit)
612 return limit->p2.p2_slow;
613 else
614 return limit->p2.p2_fast;
615 }
616 }
617
618 /*
619 * Returns a set of divisors for the desired target clock with the given
620 * refclk, or FALSE. The returned values represent the clock equation:
621 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
622 *
623 * Target and reference clocks are specified in kHz.
624 *
625 * If match_clock is provided, then best_clock P divider must match the P
626 * divider from @match_clock used for LVDS downclocking.
627 */
628 static bool
629 i9xx_find_best_dpll(const struct intel_limit *limit,
630 struct intel_crtc_state *crtc_state,
631 int target, int refclk, struct dpll *match_clock,
632 struct dpll *best_clock)
633 {
634 struct drm_device *dev = crtc_state->base.crtc->dev;
635 struct dpll clock;
636 int err = target;
637
638 memset(best_clock, 0, sizeof(*best_clock));
639
640 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
641
642 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
643 clock.m1++) {
644 for (clock.m2 = limit->m2.min;
645 clock.m2 <= limit->m2.max; clock.m2++) {
646 if (clock.m2 >= clock.m1)
647 break;
648 for (clock.n = limit->n.min;
649 clock.n <= limit->n.max; clock.n++) {
650 for (clock.p1 = limit->p1.min;
651 clock.p1 <= limit->p1.max; clock.p1++) {
652 int this_err;
653
654 i9xx_calc_dpll_params(refclk, &clock);
655 if (!intel_PLL_is_valid(to_i915(dev),
656 limit,
657 &clock))
658 continue;
659 if (match_clock &&
660 clock.p != match_clock->p)
661 continue;
662
663 this_err = abs(clock.dot - target);
664 if (this_err < err) {
665 *best_clock = clock;
666 err = this_err;
667 }
668 }
669 }
670 }
671 }
672
673 return (err != target);
674 }
675
676 /*
677 * Returns a set of divisors for the desired target clock with the given
678 * refclk, or FALSE. The returned values represent the clock equation:
679 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
680 *
681 * Target and reference clocks are specified in kHz.
682 *
683 * If match_clock is provided, then best_clock P divider must match the P
684 * divider from @match_clock used for LVDS downclocking.
685 */
686 static bool
687 pnv_find_best_dpll(const struct intel_limit *limit,
688 struct intel_crtc_state *crtc_state,
689 int target, int refclk, struct dpll *match_clock,
690 struct dpll *best_clock)
691 {
692 struct drm_device *dev = crtc_state->base.crtc->dev;
693 struct dpll clock;
694 int err = target;
695
696 memset(best_clock, 0, sizeof(*best_clock));
697
698 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
699
700 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
701 clock.m1++) {
702 for (clock.m2 = limit->m2.min;
703 clock.m2 <= limit->m2.max; clock.m2++) {
704 for (clock.n = limit->n.min;
705 clock.n <= limit->n.max; clock.n++) {
706 for (clock.p1 = limit->p1.min;
707 clock.p1 <= limit->p1.max; clock.p1++) {
708 int this_err;
709
710 pnv_calc_dpll_params(refclk, &clock);
711 if (!intel_PLL_is_valid(to_i915(dev),
712 limit,
713 &clock))
714 continue;
715 if (match_clock &&
716 clock.p != match_clock->p)
717 continue;
718
719 this_err = abs(clock.dot - target);
720 if (this_err < err) {
721 *best_clock = clock;
722 err = this_err;
723 }
724 }
725 }
726 }
727 }
728
729 return (err != target);
730 }
731
732 /*
733 * Returns a set of divisors for the desired target clock with the given
734 * refclk, or FALSE. The returned values represent the clock equation:
735 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
736 *
737 * Target and reference clocks are specified in kHz.
738 *
739 * If match_clock is provided, then best_clock P divider must match the P
740 * divider from @match_clock used for LVDS downclocking.
741 */
742 static bool
743 g4x_find_best_dpll(const struct intel_limit *limit,
744 struct intel_crtc_state *crtc_state,
745 int target, int refclk, struct dpll *match_clock,
746 struct dpll *best_clock)
747 {
748 struct drm_device *dev = crtc_state->base.crtc->dev;
749 struct dpll clock;
750 int max_n;
751 bool found = false;
752 /* approximately equals target * 0.00585 */
753 int err_most = (target >> 8) + (target >> 9);
754
755 memset(best_clock, 0, sizeof(*best_clock));
756
757 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
758
759 max_n = limit->n.max;
760 /* based on hardware requirement, prefer smaller n to precision */
761 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
762 /* based on hardware requirement, prefere larger m1,m2 */
763 for (clock.m1 = limit->m1.max;
764 clock.m1 >= limit->m1.min; clock.m1--) {
765 for (clock.m2 = limit->m2.max;
766 clock.m2 >= limit->m2.min; clock.m2--) {
767 for (clock.p1 = limit->p1.max;
768 clock.p1 >= limit->p1.min; clock.p1--) {
769 int this_err;
770
771 i9xx_calc_dpll_params(refclk, &clock);
772 if (!intel_PLL_is_valid(to_i915(dev),
773 limit,
774 &clock))
775 continue;
776
777 this_err = abs(clock.dot - target);
778 if (this_err < err_most) {
779 *best_clock = clock;
780 err_most = this_err;
781 max_n = clock.n;
782 found = true;
783 }
784 }
785 }
786 }
787 }
788 return found;
789 }
790
791 /*
792 * Check if the calculated PLL configuration is more optimal compared to the
793 * best configuration and error found so far. Return the calculated error.
794 */
795 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
796 const struct dpll *calculated_clock,
797 const struct dpll *best_clock,
798 unsigned int best_error_ppm,
799 unsigned int *error_ppm)
800 {
801 /*
802 * For CHV ignore the error and consider only the P value.
803 * Prefer a bigger P value based on HW requirements.
804 */
805 if (IS_CHERRYVIEW(to_i915(dev))) {
806 *error_ppm = 0;
807
808 return calculated_clock->p > best_clock->p;
809 }
810
811 if (WARN_ON_ONCE(!target_freq))
812 return false;
813
814 *error_ppm = div_u64(1000000ULL *
815 abs(target_freq - calculated_clock->dot),
816 target_freq);
817 /*
818 * Prefer a better P value over a better (smaller) error if the error
819 * is small. Ensure this preference for future configurations too by
820 * setting the error to 0.
821 */
822 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
823 *error_ppm = 0;
824
825 return true;
826 }
827
828 return *error_ppm + 10 < best_error_ppm;
829 }
830
831 /*
832 * Returns a set of divisors for the desired target clock with the given
833 * refclk, or FALSE. The returned values represent the clock equation:
834 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
835 */
836 static bool
837 vlv_find_best_dpll(const struct intel_limit *limit,
838 struct intel_crtc_state *crtc_state,
839 int target, int refclk, struct dpll *match_clock,
840 struct dpll *best_clock)
841 {
842 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
843 struct drm_device *dev = crtc->base.dev;
844 struct dpll clock;
845 unsigned int bestppm = 1000000;
846 /* min update 19.2 MHz */
847 int max_n = min(limit->n.max, refclk / 19200);
848 bool found = false;
849
850 target *= 5; /* fast clock */
851
852 memset(best_clock, 0, sizeof(*best_clock));
853
854 /* based on hardware requirement, prefer smaller n to precision */
855 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
856 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
857 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
858 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859 clock.p = clock.p1 * clock.p2;
860 /* based on hardware requirement, prefer bigger m1,m2 values */
861 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
862 unsigned int ppm;
863
864 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
865 refclk * clock.m1);
866
867 vlv_calc_dpll_params(refclk, &clock);
868
869 if (!intel_PLL_is_valid(to_i915(dev),
870 limit,
871 &clock))
872 continue;
873
874 if (!vlv_PLL_is_optimal(dev, target,
875 &clock,
876 best_clock,
877 bestppm, &ppm))
878 continue;
879
880 *best_clock = clock;
881 bestppm = ppm;
882 found = true;
883 }
884 }
885 }
886 }
887
888 return found;
889 }
890
891 /*
892 * Returns a set of divisors for the desired target clock with the given
893 * refclk, or FALSE. The returned values represent the clock equation:
894 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
895 */
896 static bool
897 chv_find_best_dpll(const struct intel_limit *limit,
898 struct intel_crtc_state *crtc_state,
899 int target, int refclk, struct dpll *match_clock,
900 struct dpll *best_clock)
901 {
902 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
903 struct drm_device *dev = crtc->base.dev;
904 unsigned int best_error_ppm;
905 struct dpll clock;
906 u64 m2;
907 int found = false;
908
909 memset(best_clock, 0, sizeof(*best_clock));
910 best_error_ppm = 1000000;
911
912 /*
913 * Based on hardware doc, the n always set to 1, and m1 always
914 * set to 2. If requires to support 200Mhz refclk, we need to
915 * revisit this because n may not 1 anymore.
916 */
917 clock.n = 1, clock.m1 = 2;
918 target *= 5; /* fast clock */
919
920 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
921 for (clock.p2 = limit->p2.p2_fast;
922 clock.p2 >= limit->p2.p2_slow;
923 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
924 unsigned int error_ppm;
925
926 clock.p = clock.p1 * clock.p2;
927
928 m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
929 clock.n) << 22, refclk * clock.m1);
930
931 if (m2 > INT_MAX/clock.m1)
932 continue;
933
934 clock.m2 = m2;
935
936 chv_calc_dpll_params(refclk, &clock);
937
938 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
939 continue;
940
941 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
942 best_error_ppm, &error_ppm))
943 continue;
944
945 *best_clock = clock;
946 best_error_ppm = error_ppm;
947 found = true;
948 }
949 }
950
951 return found;
952 }
953
954 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
955 struct dpll *best_clock)
956 {
957 int refclk = 100000;
958 const struct intel_limit *limit = &intel_limits_bxt;
959
960 return chv_find_best_dpll(limit, crtc_state,
961 target_clock, refclk, NULL, best_clock);
962 }
963
964 bool intel_crtc_active(struct intel_crtc *crtc)
965 {
966 /* Be paranoid as we can arrive here with only partial
967 * state retrieved from the hardware during setup.
968 *
969 * We can ditch the adjusted_mode.crtc_clock check as soon
970 * as Haswell has gained clock readout/fastboot support.
971 *
972 * We can ditch the crtc->primary->state->fb check as soon as we can
973 * properly reconstruct framebuffers.
974 *
975 * FIXME: The intel_crtc->active here should be switched to
976 * crtc->state->active once we have proper CRTC states wired up
977 * for atomic.
978 */
979 return crtc->active && crtc->base.primary->state->fb &&
980 crtc->config->base.adjusted_mode.crtc_clock;
981 }
982
983 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
984 enum pipe pipe)
985 {
986 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
987
988 return crtc->config->cpu_transcoder;
989 }
990
991 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
992 enum pipe pipe)
993 {
994 i915_reg_t reg = PIPEDSL(pipe);
995 u32 line1, line2;
996 u32 line_mask;
997
998 if (IS_GEN(dev_priv, 2))
999 line_mask = DSL_LINEMASK_GEN2;
1000 else
1001 line_mask = DSL_LINEMASK_GEN3;
1002
1003 line1 = I915_READ(reg) & line_mask;
1004 msleep(5);
1005 line2 = I915_READ(reg) & line_mask;
1006
1007 return line1 != line2;
1008 }
1009
1010 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1011 {
1012 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1013 enum pipe pipe = crtc->pipe;
1014
1015 /* Wait for the display line to settle/start moving */
1016 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1017 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1018 pipe_name(pipe), onoff(state));
1019 }
1020
1021 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1022 {
1023 wait_for_pipe_scanline_moving(crtc, false);
1024 }
1025
1026 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1027 {
1028 wait_for_pipe_scanline_moving(crtc, true);
1029 }
1030
1031 static void
1032 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1033 {
1034 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1035 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1036
1037 if (INTEL_GEN(dev_priv) >= 4) {
1038 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1039 i915_reg_t reg = PIPECONF(cpu_transcoder);
1040
1041 /* Wait for the Pipe State to go off */
1042 if (intel_wait_for_register(dev_priv,
1043 reg, I965_PIPECONF_ACTIVE, 0,
1044 100))
1045 WARN(1, "pipe_off wait timed out\n");
1046 } else {
1047 intel_wait_for_pipe_scanline_stopped(crtc);
1048 }
1049 }
1050
1051 /* Only for pre-ILK configs */
1052 void assert_pll(struct drm_i915_private *dev_priv,
1053 enum pipe pipe, bool state)
1054 {
1055 u32 val;
1056 bool cur_state;
1057
1058 val = I915_READ(DPLL(pipe));
1059 cur_state = !!(val & DPLL_VCO_ENABLE);
1060 I915_STATE_WARN(cur_state != state,
1061 "PLL state assertion failure (expected %s, current %s)\n",
1062 onoff(state), onoff(cur_state));
1063 }
1064
1065 /* XXX: the dsi pll is shared between MIPI DSI ports */
1066 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1067 {
1068 u32 val;
1069 bool cur_state;
1070
1071 mutex_lock(&dev_priv->sb_lock);
1072 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1073 mutex_unlock(&dev_priv->sb_lock);
1074
1075 cur_state = val & DSI_PLL_VCO_EN;
1076 I915_STATE_WARN(cur_state != state,
1077 "DSI PLL state assertion failure (expected %s, current %s)\n",
1078 onoff(state), onoff(cur_state));
1079 }
1080
1081 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1082 enum pipe pipe, bool state)
1083 {
1084 bool cur_state;
1085 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1086 pipe);
1087
1088 if (HAS_DDI(dev_priv)) {
1089 /* DDI does not have a specific FDI_TX register */
1090 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1091 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1092 } else {
1093 u32 val = I915_READ(FDI_TX_CTL(pipe));
1094 cur_state = !!(val & FDI_TX_ENABLE);
1095 }
1096 I915_STATE_WARN(cur_state != state,
1097 "FDI TX state assertion failure (expected %s, current %s)\n",
1098 onoff(state), onoff(cur_state));
1099 }
1100 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1101 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1102
1103 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1104 enum pipe pipe, bool state)
1105 {
1106 u32 val;
1107 bool cur_state;
1108
1109 val = I915_READ(FDI_RX_CTL(pipe));
1110 cur_state = !!(val & FDI_RX_ENABLE);
1111 I915_STATE_WARN(cur_state != state,
1112 "FDI RX state assertion failure (expected %s, current %s)\n",
1113 onoff(state), onoff(cur_state));
1114 }
1115 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1116 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1117
1118 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1119 enum pipe pipe)
1120 {
1121 u32 val;
1122
1123 /* ILK FDI PLL is always enabled */
1124 if (IS_GEN(dev_priv, 5))
1125 return;
1126
1127 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1128 if (HAS_DDI(dev_priv))
1129 return;
1130
1131 val = I915_READ(FDI_TX_CTL(pipe));
1132 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1133 }
1134
1135 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1136 enum pipe pipe, bool state)
1137 {
1138 u32 val;
1139 bool cur_state;
1140
1141 val = I915_READ(FDI_RX_CTL(pipe));
1142 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1143 I915_STATE_WARN(cur_state != state,
1144 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1145 onoff(state), onoff(cur_state));
1146 }
1147
1148 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1149 {
1150 i915_reg_t pp_reg;
1151 u32 val;
1152 enum pipe panel_pipe = INVALID_PIPE;
1153 bool locked = true;
1154
1155 if (WARN_ON(HAS_DDI(dev_priv)))
1156 return;
1157
1158 if (HAS_PCH_SPLIT(dev_priv)) {
1159 u32 port_sel;
1160
1161 pp_reg = PP_CONTROL(0);
1162 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1163
1164 switch (port_sel) {
1165 case PANEL_PORT_SELECT_LVDS:
1166 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1167 break;
1168 case PANEL_PORT_SELECT_DPA:
1169 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1170 break;
1171 case PANEL_PORT_SELECT_DPC:
1172 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1173 break;
1174 case PANEL_PORT_SELECT_DPD:
1175 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1176 break;
1177 default:
1178 MISSING_CASE(port_sel);
1179 break;
1180 }
1181 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1182 /* presumably write lock depends on pipe, not port select */
1183 pp_reg = PP_CONTROL(pipe);
1184 panel_pipe = pipe;
1185 } else {
1186 u32 port_sel;
1187
1188 pp_reg = PP_CONTROL(0);
1189 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1190
1191 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1192 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1193 }
1194
1195 val = I915_READ(pp_reg);
1196 if (!(val & PANEL_POWER_ON) ||
1197 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1198 locked = false;
1199
1200 I915_STATE_WARN(panel_pipe == pipe && locked,
1201 "panel assertion failure, pipe %c regs locked\n",
1202 pipe_name(pipe));
1203 }
1204
1205 void assert_pipe(struct drm_i915_private *dev_priv,
1206 enum pipe pipe, bool state)
1207 {
1208 bool cur_state;
1209 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1210 pipe);
1211 enum intel_display_power_domain power_domain;
1212 intel_wakeref_t wakeref;
1213
1214 /* we keep both pipes enabled on 830 */
1215 if (IS_I830(dev_priv))
1216 state = true;
1217
1218 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1219 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1220 if (wakeref) {
1221 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1222 cur_state = !!(val & PIPECONF_ENABLE);
1223
1224 intel_display_power_put(dev_priv, power_domain, wakeref);
1225 } else {
1226 cur_state = false;
1227 }
1228
1229 I915_STATE_WARN(cur_state != state,
1230 "pipe %c assertion failure (expected %s, current %s)\n",
1231 pipe_name(pipe), onoff(state), onoff(cur_state));
1232 }
1233
1234 static void assert_plane(struct intel_plane *plane, bool state)
1235 {
1236 enum pipe pipe;
1237 bool cur_state;
1238
1239 cur_state = plane->get_hw_state(plane, &pipe);
1240
1241 I915_STATE_WARN(cur_state != state,
1242 "%s assertion failure (expected %s, current %s)\n",
1243 plane->base.name, onoff(state), onoff(cur_state));
1244 }
1245
1246 #define assert_plane_enabled(p) assert_plane(p, true)
1247 #define assert_plane_disabled(p) assert_plane(p, false)
1248
1249 static void assert_planes_disabled(struct intel_crtc *crtc)
1250 {
1251 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1252 struct intel_plane *plane;
1253
1254 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1255 assert_plane_disabled(plane);
1256 }
1257
1258 static void assert_vblank_disabled(struct drm_crtc *crtc)
1259 {
1260 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1261 drm_crtc_vblank_put(crtc);
1262 }
1263
1264 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1265 enum pipe pipe)
1266 {
1267 u32 val;
1268 bool enabled;
1269
1270 val = I915_READ(PCH_TRANSCONF(pipe));
1271 enabled = !!(val & TRANS_ENABLE);
1272 I915_STATE_WARN(enabled,
1273 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1274 pipe_name(pipe));
1275 }
1276
1277 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1278 enum pipe pipe, enum port port,
1279 i915_reg_t dp_reg)
1280 {
1281 enum pipe port_pipe;
1282 bool state;
1283
1284 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1285
1286 I915_STATE_WARN(state && port_pipe == pipe,
1287 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1288 port_name(port), pipe_name(pipe));
1289
1290 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1291 "IBX PCH DP %c still using transcoder B\n",
1292 port_name(port));
1293 }
1294
1295 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1296 enum pipe pipe, enum port port,
1297 i915_reg_t hdmi_reg)
1298 {
1299 enum pipe port_pipe;
1300 bool state;
1301
1302 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1303
1304 I915_STATE_WARN(state && port_pipe == pipe,
1305 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1306 port_name(port), pipe_name(pipe));
1307
1308 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1309 "IBX PCH HDMI %c still using transcoder B\n",
1310 port_name(port));
1311 }
1312
1313 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1314 enum pipe pipe)
1315 {
1316 enum pipe port_pipe;
1317
1318 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1319 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1320 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1321
1322 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1323 port_pipe == pipe,
1324 "PCH VGA enabled on transcoder %c, should be disabled\n",
1325 pipe_name(pipe));
1326
1327 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1328 port_pipe == pipe,
1329 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1330 pipe_name(pipe));
1331
1332 /* PCH SDVOB multiplex with HDMIB */
1333 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1334 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1335 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1336 }
1337
1338 static void _vlv_enable_pll(struct intel_crtc *crtc,
1339 const struct intel_crtc_state *pipe_config)
1340 {
1341 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1342 enum pipe pipe = crtc->pipe;
1343
1344 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1345 POSTING_READ(DPLL(pipe));
1346 udelay(150);
1347
1348 if (intel_wait_for_register(dev_priv,
1349 DPLL(pipe),
1350 DPLL_LOCK_VLV,
1351 DPLL_LOCK_VLV,
1352 1))
1353 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1354 }
1355
1356 static void vlv_enable_pll(struct intel_crtc *crtc,
1357 const struct intel_crtc_state *pipe_config)
1358 {
1359 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1360 enum pipe pipe = crtc->pipe;
1361
1362 assert_pipe_disabled(dev_priv, pipe);
1363
1364 /* PLL is protected by panel, make sure we can write it */
1365 assert_panel_unlocked(dev_priv, pipe);
1366
1367 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1368 _vlv_enable_pll(crtc, pipe_config);
1369
1370 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1371 POSTING_READ(DPLL_MD(pipe));
1372 }
1373
1374
1375 static void _chv_enable_pll(struct intel_crtc *crtc,
1376 const struct intel_crtc_state *pipe_config)
1377 {
1378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379 enum pipe pipe = crtc->pipe;
1380 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1381 u32 tmp;
1382
1383 mutex_lock(&dev_priv->sb_lock);
1384
1385 /* Enable back the 10bit clock to display controller */
1386 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1387 tmp |= DPIO_DCLKP_EN;
1388 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1389
1390 mutex_unlock(&dev_priv->sb_lock);
1391
1392 /*
1393 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1394 */
1395 udelay(1);
1396
1397 /* Enable PLL */
1398 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1399
1400 /* Check PLL is locked */
1401 if (intel_wait_for_register(dev_priv,
1402 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1403 1))
1404 DRM_ERROR("PLL %d failed to lock\n", pipe);
1405 }
1406
1407 static void chv_enable_pll(struct intel_crtc *crtc,
1408 const struct intel_crtc_state *pipe_config)
1409 {
1410 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1411 enum pipe pipe = crtc->pipe;
1412
1413 assert_pipe_disabled(dev_priv, pipe);
1414
1415 /* PLL is protected by panel, make sure we can write it */
1416 assert_panel_unlocked(dev_priv, pipe);
1417
1418 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1419 _chv_enable_pll(crtc, pipe_config);
1420
1421 if (pipe != PIPE_A) {
1422 /*
1423 * WaPixelRepeatModeFixForC0:chv
1424 *
1425 * DPLLCMD is AWOL. Use chicken bits to propagate
1426 * the value from DPLLBMD to either pipe B or C.
1427 */
1428 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1429 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1430 I915_WRITE(CBR4_VLV, 0);
1431 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1432
1433 /*
1434 * DPLLB VGA mode also seems to cause problems.
1435 * We should always have it disabled.
1436 */
1437 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1438 } else {
1439 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1440 POSTING_READ(DPLL_MD(pipe));
1441 }
1442 }
1443
1444 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1445 {
1446 struct intel_crtc *crtc;
1447 int count = 0;
1448
1449 for_each_intel_crtc(&dev_priv->drm, crtc) {
1450 count += crtc->base.state->active &&
1451 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1452 }
1453
1454 return count;
1455 }
1456
1457 static void i9xx_enable_pll(struct intel_crtc *crtc,
1458 const struct intel_crtc_state *crtc_state)
1459 {
1460 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1461 i915_reg_t reg = DPLL(crtc->pipe);
1462 u32 dpll = crtc_state->dpll_hw_state.dpll;
1463 int i;
1464
1465 assert_pipe_disabled(dev_priv, crtc->pipe);
1466
1467 /* PLL is protected by panel, make sure we can write it */
1468 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1469 assert_panel_unlocked(dev_priv, crtc->pipe);
1470
1471 /* Enable DVO 2x clock on both PLLs if necessary */
1472 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1473 /*
1474 * It appears to be important that we don't enable this
1475 * for the current pipe before otherwise configuring the
1476 * PLL. No idea how this should be handled if multiple
1477 * DVO outputs are enabled simultaneosly.
1478 */
1479 dpll |= DPLL_DVO_2X_MODE;
1480 I915_WRITE(DPLL(!crtc->pipe),
1481 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1482 }
1483
1484 /*
1485 * Apparently we need to have VGA mode enabled prior to changing
1486 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1487 * dividers, even though the register value does change.
1488 */
1489 I915_WRITE(reg, 0);
1490
1491 I915_WRITE(reg, dpll);
1492
1493 /* Wait for the clocks to stabilize. */
1494 POSTING_READ(reg);
1495 udelay(150);
1496
1497 if (INTEL_GEN(dev_priv) >= 4) {
1498 I915_WRITE(DPLL_MD(crtc->pipe),
1499 crtc_state->dpll_hw_state.dpll_md);
1500 } else {
1501 /* The pixel multiplier can only be updated once the
1502 * DPLL is enabled and the clocks are stable.
1503 *
1504 * So write it again.
1505 */
1506 I915_WRITE(reg, dpll);
1507 }
1508
1509 /* We do this three times for luck */
1510 for (i = 0; i < 3; i++) {
1511 I915_WRITE(reg, dpll);
1512 POSTING_READ(reg);
1513 udelay(150); /* wait for warmup */
1514 }
1515 }
1516
1517 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1518 {
1519 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1520 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1521 enum pipe pipe = crtc->pipe;
1522
1523 /* Disable DVO 2x clock on both PLLs if necessary */
1524 if (IS_I830(dev_priv) &&
1525 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
1526 !intel_num_dvo_pipes(dev_priv)) {
1527 I915_WRITE(DPLL(PIPE_B),
1528 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1529 I915_WRITE(DPLL(PIPE_A),
1530 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1531 }
1532
1533 /* Don't disable pipe or pipe PLLs if needed */
1534 if (IS_I830(dev_priv))
1535 return;
1536
1537 /* Make sure the pipe isn't still relying on us */
1538 assert_pipe_disabled(dev_priv, pipe);
1539
1540 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1541 POSTING_READ(DPLL(pipe));
1542 }
1543
1544 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1545 {
1546 u32 val;
1547
1548 /* Make sure the pipe isn't still relying on us */
1549 assert_pipe_disabled(dev_priv, pipe);
1550
1551 val = DPLL_INTEGRATED_REF_CLK_VLV |
1552 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1553 if (pipe != PIPE_A)
1554 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1555
1556 I915_WRITE(DPLL(pipe), val);
1557 POSTING_READ(DPLL(pipe));
1558 }
1559
1560 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1561 {
1562 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1563 u32 val;
1564
1565 /* Make sure the pipe isn't still relying on us */
1566 assert_pipe_disabled(dev_priv, pipe);
1567
1568 val = DPLL_SSC_REF_CLK_CHV |
1569 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1570 if (pipe != PIPE_A)
1571 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1572
1573 I915_WRITE(DPLL(pipe), val);
1574 POSTING_READ(DPLL(pipe));
1575
1576 mutex_lock(&dev_priv->sb_lock);
1577
1578 /* Disable 10bit clock to display controller */
1579 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1580 val &= ~DPIO_DCLKP_EN;
1581 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1582
1583 mutex_unlock(&dev_priv->sb_lock);
1584 }
1585
1586 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1587 struct intel_digital_port *dport,
1588 unsigned int expected_mask)
1589 {
1590 u32 port_mask;
1591 i915_reg_t dpll_reg;
1592
1593 switch (dport->base.port) {
1594 case PORT_B:
1595 port_mask = DPLL_PORTB_READY_MASK;
1596 dpll_reg = DPLL(0);
1597 break;
1598 case PORT_C:
1599 port_mask = DPLL_PORTC_READY_MASK;
1600 dpll_reg = DPLL(0);
1601 expected_mask <<= 4;
1602 break;
1603 case PORT_D:
1604 port_mask = DPLL_PORTD_READY_MASK;
1605 dpll_reg = DPIO_PHY_STATUS;
1606 break;
1607 default:
1608 BUG();
1609 }
1610
1611 if (intel_wait_for_register(dev_priv,
1612 dpll_reg, port_mask, expected_mask,
1613 1000))
1614 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1615 port_name(dport->base.port),
1616 I915_READ(dpll_reg) & port_mask, expected_mask);
1617 }
1618
1619 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1620 {
1621 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1622 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1623 enum pipe pipe = crtc->pipe;
1624 i915_reg_t reg;
1625 u32 val, pipeconf_val;
1626
1627 /* Make sure PCH DPLL is enabled */
1628 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1629
1630 /* FDI must be feeding us bits for PCH ports */
1631 assert_fdi_tx_enabled(dev_priv, pipe);
1632 assert_fdi_rx_enabled(dev_priv, pipe);
1633
1634 if (HAS_PCH_CPT(dev_priv)) {
1635 /* Workaround: Set the timing override bit before enabling the
1636 * pch transcoder. */
1637 reg = TRANS_CHICKEN2(pipe);
1638 val = I915_READ(reg);
1639 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1640 I915_WRITE(reg, val);
1641 }
1642
1643 reg = PCH_TRANSCONF(pipe);
1644 val = I915_READ(reg);
1645 pipeconf_val = I915_READ(PIPECONF(pipe));
1646
1647 if (HAS_PCH_IBX(dev_priv)) {
1648 /*
1649 * Make the BPC in transcoder be consistent with
1650 * that in pipeconf reg. For HDMI we must use 8bpc
1651 * here for both 8bpc and 12bpc.
1652 */
1653 val &= ~PIPECONF_BPC_MASK;
1654 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1655 val |= PIPECONF_8BPC;
1656 else
1657 val |= pipeconf_val & PIPECONF_BPC_MASK;
1658 }
1659
1660 val &= ~TRANS_INTERLACE_MASK;
1661 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1662 if (HAS_PCH_IBX(dev_priv) &&
1663 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1664 val |= TRANS_LEGACY_INTERLACED_ILK;
1665 else
1666 val |= TRANS_INTERLACED;
1667 else
1668 val |= TRANS_PROGRESSIVE;
1669
1670 I915_WRITE(reg, val | TRANS_ENABLE);
1671 if (intel_wait_for_register(dev_priv,
1672 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1673 100))
1674 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1675 }
1676
1677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1678 enum transcoder cpu_transcoder)
1679 {
1680 u32 val, pipeconf_val;
1681
1682 /* FDI must be feeding us bits for PCH ports */
1683 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1684 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1685
1686 /* Workaround: set timing override bit. */
1687 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1688 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1690
1691 val = TRANS_ENABLE;
1692 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1693
1694 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1695 PIPECONF_INTERLACED_ILK)
1696 val |= TRANS_INTERLACED;
1697 else
1698 val |= TRANS_PROGRESSIVE;
1699
1700 I915_WRITE(LPT_TRANSCONF, val);
1701 if (intel_wait_for_register(dev_priv,
1702 LPT_TRANSCONF,
1703 TRANS_STATE_ENABLE,
1704 TRANS_STATE_ENABLE,
1705 100))
1706 DRM_ERROR("Failed to enable PCH transcoder\n");
1707 }
1708
1709 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1710 enum pipe pipe)
1711 {
1712 i915_reg_t reg;
1713 u32 val;
1714
1715 /* FDI relies on the transcoder */
1716 assert_fdi_tx_disabled(dev_priv, pipe);
1717 assert_fdi_rx_disabled(dev_priv, pipe);
1718
1719 /* Ports must be off as well */
1720 assert_pch_ports_disabled(dev_priv, pipe);
1721
1722 reg = PCH_TRANSCONF(pipe);
1723 val = I915_READ(reg);
1724 val &= ~TRANS_ENABLE;
1725 I915_WRITE(reg, val);
1726 /* wait for PCH transcoder off, transcoder state */
1727 if (intel_wait_for_register(dev_priv,
1728 reg, TRANS_STATE_ENABLE, 0,
1729 50))
1730 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1731
1732 if (HAS_PCH_CPT(dev_priv)) {
1733 /* Workaround: Clear the timing override chicken bit again. */
1734 reg = TRANS_CHICKEN2(pipe);
1735 val = I915_READ(reg);
1736 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1737 I915_WRITE(reg, val);
1738 }
1739 }
1740
1741 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1742 {
1743 u32 val;
1744
1745 val = I915_READ(LPT_TRANSCONF);
1746 val &= ~TRANS_ENABLE;
1747 I915_WRITE(LPT_TRANSCONF, val);
1748 /* wait for PCH transcoder off, transcoder state */
1749 if (intel_wait_for_register(dev_priv,
1750 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1751 50))
1752 DRM_ERROR("Failed to disable PCH transcoder\n");
1753
1754 /* Workaround: clear timing override bit. */
1755 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1756 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1757 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1758 }
1759
1760 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1761 {
1762 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1763
1764 if (HAS_PCH_LPT(dev_priv))
1765 return PIPE_A;
1766 else
1767 return crtc->pipe;
1768 }
1769
1770 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1771 {
1772 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1773
1774 /*
1775 * On i965gm the hardware frame counter reads
1776 * zero when the TV encoder is enabled :(
1777 */
1778 if (IS_I965GM(dev_priv) &&
1779 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1780 return 0;
1781
1782 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1783 return 0xffffffff; /* full 32 bit counter */
1784 else if (INTEL_GEN(dev_priv) >= 3)
1785 return 0xffffff; /* only 24 bits of frame count */
1786 else
1787 return 0; /* Gen2 doesn't have a hardware frame counter */
1788 }
1789
1790 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1791 {
1792 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1793
1794 drm_crtc_set_max_vblank_count(&crtc->base,
1795 intel_crtc_max_vblank_count(crtc_state));
1796 drm_crtc_vblank_on(&crtc->base);
1797 }
1798
1799 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1800 {
1801 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1802 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1803 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1804 enum pipe pipe = crtc->pipe;
1805 i915_reg_t reg;
1806 u32 val;
1807
1808 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1809
1810 assert_planes_disabled(crtc);
1811
1812 /*
1813 * A pipe without a PLL won't actually be able to drive bits from
1814 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1815 * need the check.
1816 */
1817 if (HAS_GMCH(dev_priv)) {
1818 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1819 assert_dsi_pll_enabled(dev_priv);
1820 else
1821 assert_pll_enabled(dev_priv, pipe);
1822 } else {
1823 if (new_crtc_state->has_pch_encoder) {
1824 /* if driving the PCH, we need FDI enabled */
1825 assert_fdi_rx_pll_enabled(dev_priv,
1826 intel_crtc_pch_transcoder(crtc));
1827 assert_fdi_tx_pll_enabled(dev_priv,
1828 (enum pipe) cpu_transcoder);
1829 }
1830 /* FIXME: assert CPU port conditions for SNB+ */
1831 }
1832
1833 reg = PIPECONF(cpu_transcoder);
1834 val = I915_READ(reg);
1835 if (val & PIPECONF_ENABLE) {
1836 /* we keep both pipes enabled on 830 */
1837 WARN_ON(!IS_I830(dev_priv));
1838 return;
1839 }
1840
1841 I915_WRITE(reg, val | PIPECONF_ENABLE);
1842 POSTING_READ(reg);
1843
1844 /*
1845 * Until the pipe starts PIPEDSL reads will return a stale value,
1846 * which causes an apparent vblank timestamp jump when PIPEDSL
1847 * resets to its proper value. That also messes up the frame count
1848 * when it's derived from the timestamps. So let's wait for the
1849 * pipe to start properly before we call drm_crtc_vblank_on()
1850 */
1851 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1852 intel_wait_for_pipe_scanline_moving(crtc);
1853 }
1854
1855 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1856 {
1857 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1859 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1860 enum pipe pipe = crtc->pipe;
1861 i915_reg_t reg;
1862 u32 val;
1863
1864 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1865
1866 /*
1867 * Make sure planes won't keep trying to pump pixels to us,
1868 * or we might hang the display.
1869 */
1870 assert_planes_disabled(crtc);
1871
1872 reg = PIPECONF(cpu_transcoder);
1873 val = I915_READ(reg);
1874 if ((val & PIPECONF_ENABLE) == 0)
1875 return;
1876
1877 /*
1878 * Double wide has implications for planes
1879 * so best keep it disabled when not needed.
1880 */
1881 if (old_crtc_state->double_wide)
1882 val &= ~PIPECONF_DOUBLE_WIDE;
1883
1884 /* Don't disable pipe or pipe PLLs if needed */
1885 if (!IS_I830(dev_priv))
1886 val &= ~PIPECONF_ENABLE;
1887
1888 I915_WRITE(reg, val);
1889 if ((val & PIPECONF_ENABLE) == 0)
1890 intel_wait_for_pipe_off(old_crtc_state);
1891 }
1892
1893 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1894 {
1895 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1896 }
1897
1898 static unsigned int
1899 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1900 {
1901 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1902 unsigned int cpp = fb->format->cpp[color_plane];
1903
1904 switch (fb->modifier) {
1905 case DRM_FORMAT_MOD_LINEAR:
1906 return cpp;
1907 case I915_FORMAT_MOD_X_TILED:
1908 if (IS_GEN(dev_priv, 2))
1909 return 128;
1910 else
1911 return 512;
1912 case I915_FORMAT_MOD_Y_TILED_CCS:
1913 if (color_plane == 1)
1914 return 128;
1915 /* fall through */
1916 case I915_FORMAT_MOD_Y_TILED:
1917 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1918 return 128;
1919 else
1920 return 512;
1921 case I915_FORMAT_MOD_Yf_TILED_CCS:
1922 if (color_plane == 1)
1923 return 128;
1924 /* fall through */
1925 case I915_FORMAT_MOD_Yf_TILED:
1926 switch (cpp) {
1927 case 1:
1928 return 64;
1929 case 2:
1930 case 4:
1931 return 128;
1932 case 8:
1933 case 16:
1934 return 256;
1935 default:
1936 MISSING_CASE(cpp);
1937 return cpp;
1938 }
1939 break;
1940 default:
1941 MISSING_CASE(fb->modifier);
1942 return cpp;
1943 }
1944 }
1945
1946 static unsigned int
1947 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1948 {
1949 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1950 return 1;
1951 else
1952 return intel_tile_size(to_i915(fb->dev)) /
1953 intel_tile_width_bytes(fb, color_plane);
1954 }
1955
1956 /* Return the tile dimensions in pixel units */
1957 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1958 unsigned int *tile_width,
1959 unsigned int *tile_height)
1960 {
1961 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1962 unsigned int cpp = fb->format->cpp[color_plane];
1963
1964 *tile_width = tile_width_bytes / cpp;
1965 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1966 }
1967
1968 unsigned int
1969 intel_fb_align_height(const struct drm_framebuffer *fb,
1970 int color_plane, unsigned int height)
1971 {
1972 unsigned int tile_height = intel_tile_height(fb, color_plane);
1973
1974 return ALIGN(height, tile_height);
1975 }
1976
1977 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1978 {
1979 unsigned int size = 0;
1980 int i;
1981
1982 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1983 size += rot_info->plane[i].width * rot_info->plane[i].height;
1984
1985 return size;
1986 }
1987
1988 static void
1989 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1990 const struct drm_framebuffer *fb,
1991 unsigned int rotation)
1992 {
1993 view->type = I915_GGTT_VIEW_NORMAL;
1994 if (drm_rotation_90_or_270(rotation)) {
1995 view->type = I915_GGTT_VIEW_ROTATED;
1996 view->rotated = to_intel_framebuffer(fb)->rot_info;
1997 }
1998 }
1999
2000 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2001 {
2002 if (IS_I830(dev_priv))
2003 return 16 * 1024;
2004 else if (IS_I85X(dev_priv))
2005 return 256;
2006 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2007 return 32;
2008 else
2009 return 4 * 1024;
2010 }
2011
2012 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2013 {
2014 if (INTEL_GEN(dev_priv) >= 9)
2015 return 256 * 1024;
2016 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2017 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2018 return 128 * 1024;
2019 else if (INTEL_GEN(dev_priv) >= 4)
2020 return 4 * 1024;
2021 else
2022 return 0;
2023 }
2024
2025 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2026 int color_plane)
2027 {
2028 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2029
2030 /* AUX_DIST needs only 4K alignment */
2031 if (color_plane == 1)
2032 return 4096;
2033
2034 switch (fb->modifier) {
2035 case DRM_FORMAT_MOD_LINEAR:
2036 return intel_linear_alignment(dev_priv);
2037 case I915_FORMAT_MOD_X_TILED:
2038 if (INTEL_GEN(dev_priv) >= 9)
2039 return 256 * 1024;
2040 return 0;
2041 case I915_FORMAT_MOD_Y_TILED_CCS:
2042 case I915_FORMAT_MOD_Yf_TILED_CCS:
2043 case I915_FORMAT_MOD_Y_TILED:
2044 case I915_FORMAT_MOD_Yf_TILED:
2045 return 1 * 1024 * 1024;
2046 default:
2047 MISSING_CASE(fb->modifier);
2048 return 0;
2049 }
2050 }
2051
2052 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2053 {
2054 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2055 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2056
2057 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2058 }
2059
2060 struct i915_vma *
2061 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2062 const struct i915_ggtt_view *view,
2063 bool uses_fence,
2064 unsigned long *out_flags)
2065 {
2066 struct drm_device *dev = fb->dev;
2067 struct drm_i915_private *dev_priv = to_i915(dev);
2068 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2069 intel_wakeref_t wakeref;
2070 struct i915_vma *vma;
2071 unsigned int pinctl;
2072 u32 alignment;
2073
2074 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2075
2076 alignment = intel_surf_alignment(fb, 0);
2077
2078 /* Note that the w/a also requires 64 PTE of padding following the
2079 * bo. We currently fill all unused PTE with the shadow page and so
2080 * we should always have valid PTE following the scanout preventing
2081 * the VT-d warning.
2082 */
2083 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2084 alignment = 256 * 1024;
2085
2086 /*
2087 * Global gtt pte registers are special registers which actually forward
2088 * writes to a chunk of system memory. Which means that there is no risk
2089 * that the register values disappear as soon as we call
2090 * intel_runtime_pm_put(), so it is correct to wrap only the
2091 * pin/unpin/fence and not more.
2092 */
2093 wakeref = intel_runtime_pm_get(dev_priv);
2094
2095 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2096
2097 pinctl = 0;
2098
2099 /* Valleyview is definitely limited to scanning out the first
2100 * 512MiB. Lets presume this behaviour was inherited from the
2101 * g4x display engine and that all earlier gen are similarly
2102 * limited. Testing suggests that it is a little more
2103 * complicated than this. For example, Cherryview appears quite
2104 * happy to scanout from anywhere within its global aperture.
2105 */
2106 if (HAS_GMCH(dev_priv))
2107 pinctl |= PIN_MAPPABLE;
2108
2109 vma = i915_gem_object_pin_to_display_plane(obj,
2110 alignment, view, pinctl);
2111 if (IS_ERR(vma))
2112 goto err;
2113
2114 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2115 int ret;
2116
2117 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2118 * fence, whereas 965+ only requires a fence if using
2119 * framebuffer compression. For simplicity, we always, when
2120 * possible, install a fence as the cost is not that onerous.
2121 *
2122 * If we fail to fence the tiled scanout, then either the
2123 * modeset will reject the change (which is highly unlikely as
2124 * the affected systems, all but one, do not have unmappable
2125 * space) or we will not be able to enable full powersaving
2126 * techniques (also likely not to apply due to various limits
2127 * FBC and the like impose on the size of the buffer, which
2128 * presumably we violated anyway with this unmappable buffer).
2129 * Anyway, it is presumably better to stumble onwards with
2130 * something and try to run the system in a "less than optimal"
2131 * mode that matches the user configuration.
2132 */
2133 ret = i915_vma_pin_fence(vma);
2134 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2135 i915_gem_object_unpin_from_display_plane(vma);
2136 vma = ERR_PTR(ret);
2137 goto err;
2138 }
2139
2140 if (ret == 0 && vma->fence)
2141 *out_flags |= PLANE_HAS_FENCE;
2142 }
2143
2144 i915_vma_get(vma);
2145 err:
2146 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2147
2148 intel_runtime_pm_put(dev_priv, wakeref);
2149 return vma;
2150 }
2151
2152 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2153 {
2154 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2155
2156 if (flags & PLANE_HAS_FENCE)
2157 i915_vma_unpin_fence(vma);
2158 i915_gem_object_unpin_from_display_plane(vma);
2159 i915_vma_put(vma);
2160 }
2161
2162 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2163 unsigned int rotation)
2164 {
2165 if (drm_rotation_90_or_270(rotation))
2166 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2167 else
2168 return fb->pitches[color_plane];
2169 }
2170
2171 /*
2172 * Convert the x/y offsets into a linear offset.
2173 * Only valid with 0/180 degree rotation, which is fine since linear
2174 * offset is only used with linear buffers on pre-hsw and tiled buffers
2175 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2176 */
2177 u32 intel_fb_xy_to_linear(int x, int y,
2178 const struct intel_plane_state *state,
2179 int color_plane)
2180 {
2181 const struct drm_framebuffer *fb = state->base.fb;
2182 unsigned int cpp = fb->format->cpp[color_plane];
2183 unsigned int pitch = state->color_plane[color_plane].stride;
2184
2185 return y * pitch + x * cpp;
2186 }
2187
2188 /*
2189 * Add the x/y offsets derived from fb->offsets[] to the user
2190 * specified plane src x/y offsets. The resulting x/y offsets
2191 * specify the start of scanout from the beginning of the gtt mapping.
2192 */
2193 void intel_add_fb_offsets(int *x, int *y,
2194 const struct intel_plane_state *state,
2195 int color_plane)
2196
2197 {
2198 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2199 unsigned int rotation = state->base.rotation;
2200
2201 if (drm_rotation_90_or_270(rotation)) {
2202 *x += intel_fb->rotated[color_plane].x;
2203 *y += intel_fb->rotated[color_plane].y;
2204 } else {
2205 *x += intel_fb->normal[color_plane].x;
2206 *y += intel_fb->normal[color_plane].y;
2207 }
2208 }
2209
2210 static u32 intel_adjust_tile_offset(int *x, int *y,
2211 unsigned int tile_width,
2212 unsigned int tile_height,
2213 unsigned int tile_size,
2214 unsigned int pitch_tiles,
2215 u32 old_offset,
2216 u32 new_offset)
2217 {
2218 unsigned int pitch_pixels = pitch_tiles * tile_width;
2219 unsigned int tiles;
2220
2221 WARN_ON(old_offset & (tile_size - 1));
2222 WARN_ON(new_offset & (tile_size - 1));
2223 WARN_ON(new_offset > old_offset);
2224
2225 tiles = (old_offset - new_offset) / tile_size;
2226
2227 *y += tiles / pitch_tiles * tile_height;
2228 *x += tiles % pitch_tiles * tile_width;
2229
2230 /* minimize x in case it got needlessly big */
2231 *y += *x / pitch_pixels * tile_height;
2232 *x %= pitch_pixels;
2233
2234 return new_offset;
2235 }
2236
2237 static bool is_surface_linear(u64 modifier, int color_plane)
2238 {
2239 return modifier == DRM_FORMAT_MOD_LINEAR;
2240 }
2241
2242 static u32 intel_adjust_aligned_offset(int *x, int *y,
2243 const struct drm_framebuffer *fb,
2244 int color_plane,
2245 unsigned int rotation,
2246 unsigned int pitch,
2247 u32 old_offset, u32 new_offset)
2248 {
2249 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2250 unsigned int cpp = fb->format->cpp[color_plane];
2251
2252 WARN_ON(new_offset > old_offset);
2253
2254 if (!is_surface_linear(fb->modifier, color_plane)) {
2255 unsigned int tile_size, tile_width, tile_height;
2256 unsigned int pitch_tiles;
2257
2258 tile_size = intel_tile_size(dev_priv);
2259 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2260
2261 if (drm_rotation_90_or_270(rotation)) {
2262 pitch_tiles = pitch / tile_height;
2263 swap(tile_width, tile_height);
2264 } else {
2265 pitch_tiles = pitch / (tile_width * cpp);
2266 }
2267
2268 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2269 tile_size, pitch_tiles,
2270 old_offset, new_offset);
2271 } else {
2272 old_offset += *y * pitch + *x * cpp;
2273
2274 *y = (old_offset - new_offset) / pitch;
2275 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2276 }
2277
2278 return new_offset;
2279 }
2280
2281 /*
2282 * Adjust the tile offset by moving the difference into
2283 * the x/y offsets.
2284 */
2285 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2286 const struct intel_plane_state *state,
2287 int color_plane,
2288 u32 old_offset, u32 new_offset)
2289 {
2290 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2291 state->base.rotation,
2292 state->color_plane[color_plane].stride,
2293 old_offset, new_offset);
2294 }
2295
2296 /*
2297 * Computes the aligned offset to the base tile and adjusts
2298 * x, y. bytes per pixel is assumed to be a power-of-two.
2299 *
2300 * In the 90/270 rotated case, x and y are assumed
2301 * to be already rotated to match the rotated GTT view, and
2302 * pitch is the tile_height aligned framebuffer height.
2303 *
2304 * This function is used when computing the derived information
2305 * under intel_framebuffer, so using any of that information
2306 * here is not allowed. Anything under drm_framebuffer can be
2307 * used. This is why the user has to pass in the pitch since it
2308 * is specified in the rotated orientation.
2309 */
2310 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2311 int *x, int *y,
2312 const struct drm_framebuffer *fb,
2313 int color_plane,
2314 unsigned int pitch,
2315 unsigned int rotation,
2316 u32 alignment)
2317 {
2318 unsigned int cpp = fb->format->cpp[color_plane];
2319 u32 offset, offset_aligned;
2320
2321 if (alignment)
2322 alignment--;
2323
2324 if (!is_surface_linear(fb->modifier, color_plane)) {
2325 unsigned int tile_size, tile_width, tile_height;
2326 unsigned int tile_rows, tiles, pitch_tiles;
2327
2328 tile_size = intel_tile_size(dev_priv);
2329 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2330
2331 if (drm_rotation_90_or_270(rotation)) {
2332 pitch_tiles = pitch / tile_height;
2333 swap(tile_width, tile_height);
2334 } else {
2335 pitch_tiles = pitch / (tile_width * cpp);
2336 }
2337
2338 tile_rows = *y / tile_height;
2339 *y %= tile_height;
2340
2341 tiles = *x / tile_width;
2342 *x %= tile_width;
2343
2344 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2345 offset_aligned = offset & ~alignment;
2346
2347 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2348 tile_size, pitch_tiles,
2349 offset, offset_aligned);
2350 } else {
2351 offset = *y * pitch + *x * cpp;
2352 offset_aligned = offset & ~alignment;
2353
2354 *y = (offset & alignment) / pitch;
2355 *x = ((offset & alignment) - *y * pitch) / cpp;
2356 }
2357
2358 return offset_aligned;
2359 }
2360
2361 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2362 const struct intel_plane_state *state,
2363 int color_plane)
2364 {
2365 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2366 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2367 const struct drm_framebuffer *fb = state->base.fb;
2368 unsigned int rotation = state->base.rotation;
2369 int pitch = state->color_plane[color_plane].stride;
2370 u32 alignment;
2371
2372 if (intel_plane->id == PLANE_CURSOR)
2373 alignment = intel_cursor_alignment(dev_priv);
2374 else
2375 alignment = intel_surf_alignment(fb, color_plane);
2376
2377 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2378 pitch, rotation, alignment);
2379 }
2380
2381 /* Convert the fb->offset[] into x/y offsets */
2382 static int intel_fb_offset_to_xy(int *x, int *y,
2383 const struct drm_framebuffer *fb,
2384 int color_plane)
2385 {
2386 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2387 unsigned int height;
2388
2389 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2390 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2391 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2392 fb->offsets[color_plane], color_plane);
2393 return -EINVAL;
2394 }
2395
2396 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2397 height = ALIGN(height, intel_tile_height(fb, color_plane));
2398
2399 /* Catch potential overflows early */
2400 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2401 fb->offsets[color_plane])) {
2402 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2403 fb->offsets[color_plane], fb->pitches[color_plane],
2404 color_plane);
2405 return -ERANGE;
2406 }
2407
2408 *x = 0;
2409 *y = 0;
2410
2411 intel_adjust_aligned_offset(x, y,
2412 fb, color_plane, DRM_MODE_ROTATE_0,
2413 fb->pitches[color_plane],
2414 fb->offsets[color_plane], 0);
2415
2416 return 0;
2417 }
2418
2419 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2420 {
2421 switch (fb_modifier) {
2422 case I915_FORMAT_MOD_X_TILED:
2423 return I915_TILING_X;
2424 case I915_FORMAT_MOD_Y_TILED:
2425 case I915_FORMAT_MOD_Y_TILED_CCS:
2426 return I915_TILING_Y;
2427 default:
2428 return I915_TILING_NONE;
2429 }
2430 }
2431
2432 /*
2433 * From the Sky Lake PRM:
2434 * "The Color Control Surface (CCS) contains the compression status of
2435 * the cache-line pairs. The compression state of the cache-line pair
2436 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2437 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2438 * cache-line-pairs. CCS is always Y tiled."
2439 *
2440 * Since cache line pairs refers to horizontally adjacent cache lines,
2441 * each cache line in the CCS corresponds to an area of 32x16 cache
2442 * lines on the main surface. Since each pixel is 4 bytes, this gives
2443 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2444 * main surface.
2445 */
2446 static const struct drm_format_info ccs_formats[] = {
2447 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2448 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2449 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2450 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2451 };
2452
2453 static const struct drm_format_info *
2454 lookup_format_info(const struct drm_format_info formats[],
2455 int num_formats, u32 format)
2456 {
2457 int i;
2458
2459 for (i = 0; i < num_formats; i++) {
2460 if (formats[i].format == format)
2461 return &formats[i];
2462 }
2463
2464 return NULL;
2465 }
2466
2467 static const struct drm_format_info *
2468 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2469 {
2470 switch (cmd->modifier[0]) {
2471 case I915_FORMAT_MOD_Y_TILED_CCS:
2472 case I915_FORMAT_MOD_Yf_TILED_CCS:
2473 return lookup_format_info(ccs_formats,
2474 ARRAY_SIZE(ccs_formats),
2475 cmd->pixel_format);
2476 default:
2477 return NULL;
2478 }
2479 }
2480
2481 bool is_ccs_modifier(u64 modifier)
2482 {
2483 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2484 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2485 }
2486
2487 static int
2488 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2489 struct drm_framebuffer *fb)
2490 {
2491 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2492 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2493 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2494 u32 gtt_offset_rotated = 0;
2495 unsigned int max_size = 0;
2496 int i, num_planes = fb->format->num_planes;
2497 unsigned int tile_size = intel_tile_size(dev_priv);
2498
2499 for (i = 0; i < num_planes; i++) {
2500 unsigned int width, height;
2501 unsigned int cpp, size;
2502 u32 offset;
2503 int x, y;
2504 int ret;
2505
2506 cpp = fb->format->cpp[i];
2507 width = drm_framebuffer_plane_width(fb->width, fb, i);
2508 height = drm_framebuffer_plane_height(fb->height, fb, i);
2509
2510 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2511 if (ret) {
2512 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2513 i, fb->offsets[i]);
2514 return ret;
2515 }
2516
2517 if (is_ccs_modifier(fb->modifier) && i == 1) {
2518 int hsub = fb->format->hsub;
2519 int vsub = fb->format->vsub;
2520 int tile_width, tile_height;
2521 int main_x, main_y;
2522 int ccs_x, ccs_y;
2523
2524 intel_tile_dims(fb, i, &tile_width, &tile_height);
2525 tile_width *= hsub;
2526 tile_height *= vsub;
2527
2528 ccs_x = (x * hsub) % tile_width;
2529 ccs_y = (y * vsub) % tile_height;
2530 main_x = intel_fb->normal[0].x % tile_width;
2531 main_y = intel_fb->normal[0].y % tile_height;
2532
2533 /*
2534 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2535 * x/y offsets must match between CCS and the main surface.
2536 */
2537 if (main_x != ccs_x || main_y != ccs_y) {
2538 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2539 main_x, main_y,
2540 ccs_x, ccs_y,
2541 intel_fb->normal[0].x,
2542 intel_fb->normal[0].y,
2543 x, y);
2544 return -EINVAL;
2545 }
2546 }
2547
2548 /*
2549 * The fence (if used) is aligned to the start of the object
2550 * so having the framebuffer wrap around across the edge of the
2551 * fenced region doesn't really work. We have no API to configure
2552 * the fence start offset within the object (nor could we probably
2553 * on gen2/3). So it's just easier if we just require that the
2554 * fb layout agrees with the fence layout. We already check that the
2555 * fb stride matches the fence stride elsewhere.
2556 */
2557 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2558 (x + width) * cpp > fb->pitches[i]) {
2559 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2560 i, fb->offsets[i]);
2561 return -EINVAL;
2562 }
2563
2564 /*
2565 * First pixel of the framebuffer from
2566 * the start of the normal gtt mapping.
2567 */
2568 intel_fb->normal[i].x = x;
2569 intel_fb->normal[i].y = y;
2570
2571 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2572 fb->pitches[i],
2573 DRM_MODE_ROTATE_0,
2574 tile_size);
2575 offset /= tile_size;
2576
2577 if (!is_surface_linear(fb->modifier, i)) {
2578 unsigned int tile_width, tile_height;
2579 unsigned int pitch_tiles;
2580 struct drm_rect r;
2581
2582 intel_tile_dims(fb, i, &tile_width, &tile_height);
2583
2584 rot_info->plane[i].offset = offset;
2585 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2586 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2587 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2588
2589 intel_fb->rotated[i].pitch =
2590 rot_info->plane[i].height * tile_height;
2591
2592 /* how many tiles does this plane need */
2593 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2594 /*
2595 * If the plane isn't horizontally tile aligned,
2596 * we need one more tile.
2597 */
2598 if (x != 0)
2599 size++;
2600
2601 /* rotate the x/y offsets to match the GTT view */
2602 r.x1 = x;
2603 r.y1 = y;
2604 r.x2 = x + width;
2605 r.y2 = y + height;
2606 drm_rect_rotate(&r,
2607 rot_info->plane[i].width * tile_width,
2608 rot_info->plane[i].height * tile_height,
2609 DRM_MODE_ROTATE_270);
2610 x = r.x1;
2611 y = r.y1;
2612
2613 /* rotate the tile dimensions to match the GTT view */
2614 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2615 swap(tile_width, tile_height);
2616
2617 /*
2618 * We only keep the x/y offsets, so push all of the
2619 * gtt offset into the x/y offsets.
2620 */
2621 intel_adjust_tile_offset(&x, &y,
2622 tile_width, tile_height,
2623 tile_size, pitch_tiles,
2624 gtt_offset_rotated * tile_size, 0);
2625
2626 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2627
2628 /*
2629 * First pixel of the framebuffer from
2630 * the start of the rotated gtt mapping.
2631 */
2632 intel_fb->rotated[i].x = x;
2633 intel_fb->rotated[i].y = y;
2634 } else {
2635 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2636 x * cpp, tile_size);
2637 }
2638
2639 /* how many tiles in total needed in the bo */
2640 max_size = max(max_size, offset + size);
2641 }
2642
2643 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2644 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2645 mul_u32_u32(max_size, tile_size), obj->base.size);
2646 return -EINVAL;
2647 }
2648
2649 return 0;
2650 }
2651
2652 static int i9xx_format_to_fourcc(int format)
2653 {
2654 switch (format) {
2655 case DISPPLANE_8BPP:
2656 return DRM_FORMAT_C8;
2657 case DISPPLANE_BGRX555:
2658 return DRM_FORMAT_XRGB1555;
2659 case DISPPLANE_BGRX565:
2660 return DRM_FORMAT_RGB565;
2661 default:
2662 case DISPPLANE_BGRX888:
2663 return DRM_FORMAT_XRGB8888;
2664 case DISPPLANE_RGBX888:
2665 return DRM_FORMAT_XBGR8888;
2666 case DISPPLANE_BGRX101010:
2667 return DRM_FORMAT_XRGB2101010;
2668 case DISPPLANE_RGBX101010:
2669 return DRM_FORMAT_XBGR2101010;
2670 }
2671 }
2672
2673 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2674 {
2675 switch (format) {
2676 case PLANE_CTL_FORMAT_RGB_565:
2677 return DRM_FORMAT_RGB565;
2678 case PLANE_CTL_FORMAT_NV12:
2679 return DRM_FORMAT_NV12;
2680 default:
2681 case PLANE_CTL_FORMAT_XRGB_8888:
2682 if (rgb_order) {
2683 if (alpha)
2684 return DRM_FORMAT_ABGR8888;
2685 else
2686 return DRM_FORMAT_XBGR8888;
2687 } else {
2688 if (alpha)
2689 return DRM_FORMAT_ARGB8888;
2690 else
2691 return DRM_FORMAT_XRGB8888;
2692 }
2693 case PLANE_CTL_FORMAT_XRGB_2101010:
2694 if (rgb_order)
2695 return DRM_FORMAT_XBGR2101010;
2696 else
2697 return DRM_FORMAT_XRGB2101010;
2698 }
2699 }
2700
2701 static bool
2702 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2703 struct intel_initial_plane_config *plane_config)
2704 {
2705 struct drm_device *dev = crtc->base.dev;
2706 struct drm_i915_private *dev_priv = to_i915(dev);
2707 struct drm_i915_gem_object *obj = NULL;
2708 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2709 struct drm_framebuffer *fb = &plane_config->fb->base;
2710 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2711 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2712 PAGE_SIZE);
2713
2714 size_aligned -= base_aligned;
2715
2716 if (plane_config->size == 0)
2717 return false;
2718
2719 /* If the FB is too big, just don't use it since fbdev is not very
2720 * important and we should probably use that space with FBC or other
2721 * features. */
2722 if (size_aligned * 2 > dev_priv->stolen_usable_size)
2723 return false;
2724
2725 switch (fb->modifier) {
2726 case DRM_FORMAT_MOD_LINEAR:
2727 case I915_FORMAT_MOD_X_TILED:
2728 case I915_FORMAT_MOD_Y_TILED:
2729 break;
2730 default:
2731 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2732 fb->modifier);
2733 return false;
2734 }
2735
2736 mutex_lock(&dev->struct_mutex);
2737 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2738 base_aligned,
2739 base_aligned,
2740 size_aligned);
2741 mutex_unlock(&dev->struct_mutex);
2742 if (!obj)
2743 return false;
2744
2745 switch (plane_config->tiling) {
2746 case I915_TILING_NONE:
2747 break;
2748 case I915_TILING_X:
2749 case I915_TILING_Y:
2750 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2751 break;
2752 default:
2753 MISSING_CASE(plane_config->tiling);
2754 return false;
2755 }
2756
2757 mode_cmd.pixel_format = fb->format->format;
2758 mode_cmd.width = fb->width;
2759 mode_cmd.height = fb->height;
2760 mode_cmd.pitches[0] = fb->pitches[0];
2761 mode_cmd.modifier[0] = fb->modifier;
2762 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2763
2764 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2765 DRM_DEBUG_KMS("intel fb init failed\n");
2766 goto out_unref_obj;
2767 }
2768
2769
2770 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2771 return true;
2772
2773 out_unref_obj:
2774 i915_gem_object_put(obj);
2775 return false;
2776 }
2777
2778 static void
2779 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2780 struct intel_plane_state *plane_state,
2781 bool visible)
2782 {
2783 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2784
2785 plane_state->base.visible = visible;
2786
2787 if (visible)
2788 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2789 else
2790 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2791 }
2792
2793 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2794 {
2795 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2796 struct drm_plane *plane;
2797
2798 /*
2799 * Active_planes aliases if multiple "primary" or cursor planes
2800 * have been used on the same (or wrong) pipe. plane_mask uses
2801 * unique ids, hence we can use that to reconstruct active_planes.
2802 */
2803 crtc_state->active_planes = 0;
2804
2805 drm_for_each_plane_mask(plane, &dev_priv->drm,
2806 crtc_state->base.plane_mask)
2807 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2808 }
2809
2810 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2811 struct intel_plane *plane)
2812 {
2813 struct intel_crtc_state *crtc_state =
2814 to_intel_crtc_state(crtc->base.state);
2815 struct intel_plane_state *plane_state =
2816 to_intel_plane_state(plane->base.state);
2817
2818 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2819 plane->base.base.id, plane->base.name,
2820 crtc->base.base.id, crtc->base.name);
2821
2822 intel_set_plane_visible(crtc_state, plane_state, false);
2823 fixup_active_planes(crtc_state);
2824
2825 if (plane->id == PLANE_PRIMARY)
2826 intel_pre_disable_primary_noatomic(&crtc->base);
2827
2828 trace_intel_disable_plane(&plane->base, crtc);
2829 plane->disable_plane(plane, crtc_state);
2830 }
2831
2832 static void
2833 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2834 struct intel_initial_plane_config *plane_config)
2835 {
2836 struct drm_device *dev = intel_crtc->base.dev;
2837 struct drm_i915_private *dev_priv = to_i915(dev);
2838 struct drm_crtc *c;
2839 struct drm_i915_gem_object *obj;
2840 struct drm_plane *primary = intel_crtc->base.primary;
2841 struct drm_plane_state *plane_state = primary->state;
2842 struct intel_plane *intel_plane = to_intel_plane(primary);
2843 struct intel_plane_state *intel_state =
2844 to_intel_plane_state(plane_state);
2845 struct drm_framebuffer *fb;
2846
2847 if (!plane_config->fb)
2848 return;
2849
2850 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2851 fb = &plane_config->fb->base;
2852 goto valid_fb;
2853 }
2854
2855 kfree(plane_config->fb);
2856
2857 /*
2858 * Failed to alloc the obj, check to see if we should share
2859 * an fb with another CRTC instead
2860 */
2861 for_each_crtc(dev, c) {
2862 struct intel_plane_state *state;
2863
2864 if (c == &intel_crtc->base)
2865 continue;
2866
2867 if (!to_intel_crtc(c)->active)
2868 continue;
2869
2870 state = to_intel_plane_state(c->primary->state);
2871 if (!state->vma)
2872 continue;
2873
2874 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2875 fb = state->base.fb;
2876 drm_framebuffer_get(fb);
2877 goto valid_fb;
2878 }
2879 }
2880
2881 /*
2882 * We've failed to reconstruct the BIOS FB. Current display state
2883 * indicates that the primary plane is visible, but has a NULL FB,
2884 * which will lead to problems later if we don't fix it up. The
2885 * simplest solution is to just disable the primary plane now and
2886 * pretend the BIOS never had it enabled.
2887 */
2888 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2889
2890 return;
2891
2892 valid_fb:
2893 intel_state->base.rotation = plane_config->rotation;
2894 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2895 intel_state->base.rotation);
2896 intel_state->color_plane[0].stride =
2897 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2898
2899 mutex_lock(&dev->struct_mutex);
2900 intel_state->vma =
2901 intel_pin_and_fence_fb_obj(fb,
2902 &intel_state->view,
2903 intel_plane_uses_fence(intel_state),
2904 &intel_state->flags);
2905 mutex_unlock(&dev->struct_mutex);
2906 if (IS_ERR(intel_state->vma)) {
2907 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2908 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2909
2910 intel_state->vma = NULL;
2911 drm_framebuffer_put(fb);
2912 return;
2913 }
2914
2915 obj = intel_fb_obj(fb);
2916 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2917
2918 plane_state->src_x = 0;
2919 plane_state->src_y = 0;
2920 plane_state->src_w = fb->width << 16;
2921 plane_state->src_h = fb->height << 16;
2922
2923 plane_state->crtc_x = 0;
2924 plane_state->crtc_y = 0;
2925 plane_state->crtc_w = fb->width;
2926 plane_state->crtc_h = fb->height;
2927
2928 intel_state->base.src = drm_plane_state_src(plane_state);
2929 intel_state->base.dst = drm_plane_state_dest(plane_state);
2930
2931 if (i915_gem_object_is_tiled(obj))
2932 dev_priv->preserve_bios_swizzle = true;
2933
2934 plane_state->fb = fb;
2935 plane_state->crtc = &intel_crtc->base;
2936
2937 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2938 &obj->frontbuffer_bits);
2939 }
2940
2941 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2942 int color_plane,
2943 unsigned int rotation)
2944 {
2945 int cpp = fb->format->cpp[color_plane];
2946
2947 switch (fb->modifier) {
2948 case DRM_FORMAT_MOD_LINEAR:
2949 case I915_FORMAT_MOD_X_TILED:
2950 switch (cpp) {
2951 case 8:
2952 return 4096;
2953 case 4:
2954 case 2:
2955 case 1:
2956 return 8192;
2957 default:
2958 MISSING_CASE(cpp);
2959 break;
2960 }
2961 break;
2962 case I915_FORMAT_MOD_Y_TILED_CCS:
2963 case I915_FORMAT_MOD_Yf_TILED_CCS:
2964 /* FIXME AUX plane? */
2965 case I915_FORMAT_MOD_Y_TILED:
2966 case I915_FORMAT_MOD_Yf_TILED:
2967 switch (cpp) {
2968 case 8:
2969 return 2048;
2970 case 4:
2971 return 4096;
2972 case 2:
2973 case 1:
2974 return 8192;
2975 default:
2976 MISSING_CASE(cpp);
2977 break;
2978 }
2979 break;
2980 default:
2981 MISSING_CASE(fb->modifier);
2982 }
2983
2984 return 2048;
2985 }
2986
2987 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2988 int main_x, int main_y, u32 main_offset)
2989 {
2990 const struct drm_framebuffer *fb = plane_state->base.fb;
2991 int hsub = fb->format->hsub;
2992 int vsub = fb->format->vsub;
2993 int aux_x = plane_state->color_plane[1].x;
2994 int aux_y = plane_state->color_plane[1].y;
2995 u32 aux_offset = plane_state->color_plane[1].offset;
2996 u32 alignment = intel_surf_alignment(fb, 1);
2997
2998 while (aux_offset >= main_offset && aux_y <= main_y) {
2999 int x, y;
3000
3001 if (aux_x == main_x && aux_y == main_y)
3002 break;
3003
3004 if (aux_offset == 0)
3005 break;
3006
3007 x = aux_x / hsub;
3008 y = aux_y / vsub;
3009 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3010 aux_offset, aux_offset - alignment);
3011 aux_x = x * hsub + aux_x % hsub;
3012 aux_y = y * vsub + aux_y % vsub;
3013 }
3014
3015 if (aux_x != main_x || aux_y != main_y)
3016 return false;
3017
3018 plane_state->color_plane[1].offset = aux_offset;
3019 plane_state->color_plane[1].x = aux_x;
3020 plane_state->color_plane[1].y = aux_y;
3021
3022 return true;
3023 }
3024
3025 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3026 {
3027 const struct drm_framebuffer *fb = plane_state->base.fb;
3028 unsigned int rotation = plane_state->base.rotation;
3029 int x = plane_state->base.src.x1 >> 16;
3030 int y = plane_state->base.src.y1 >> 16;
3031 int w = drm_rect_width(&plane_state->base.src) >> 16;
3032 int h = drm_rect_height(&plane_state->base.src) >> 16;
3033 int max_width = skl_max_plane_width(fb, 0, rotation);
3034 int max_height = 4096;
3035 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3036
3037 if (w > max_width || h > max_height) {
3038 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3039 w, h, max_width, max_height);
3040 return -EINVAL;
3041 }
3042
3043 intel_add_fb_offsets(&x, &y, plane_state, 0);
3044 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3045 alignment = intel_surf_alignment(fb, 0);
3046
3047 /*
3048 * AUX surface offset is specified as the distance from the
3049 * main surface offset, and it must be non-negative. Make
3050 * sure that is what we will get.
3051 */
3052 if (offset > aux_offset)
3053 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3054 offset, aux_offset & ~(alignment - 1));
3055
3056 /*
3057 * When using an X-tiled surface, the plane blows up
3058 * if the x offset + width exceed the stride.
3059 *
3060 * TODO: linear and Y-tiled seem fine, Yf untested,
3061 */
3062 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3063 int cpp = fb->format->cpp[0];
3064
3065 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3066 if (offset == 0) {
3067 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3068 return -EINVAL;
3069 }
3070
3071 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3072 offset, offset - alignment);
3073 }
3074 }
3075
3076 /*
3077 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3078 * they match with the main surface x/y offsets.
3079 */
3080 if (is_ccs_modifier(fb->modifier)) {
3081 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3082 if (offset == 0)
3083 break;
3084
3085 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3086 offset, offset - alignment);
3087 }
3088
3089 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3090 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3091 return -EINVAL;
3092 }
3093 }
3094
3095 plane_state->color_plane[0].offset = offset;
3096 plane_state->color_plane[0].x = x;
3097 plane_state->color_plane[0].y = y;
3098
3099 return 0;
3100 }
3101
3102 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3103 {
3104 const struct drm_framebuffer *fb = plane_state->base.fb;
3105 unsigned int rotation = plane_state->base.rotation;
3106 int max_width = skl_max_plane_width(fb, 1, rotation);
3107 int max_height = 4096;
3108 int x = plane_state->base.src.x1 >> 17;
3109 int y = plane_state->base.src.y1 >> 17;
3110 int w = drm_rect_width(&plane_state->base.src) >> 17;
3111 int h = drm_rect_height(&plane_state->base.src) >> 17;
3112 u32 offset;
3113
3114 intel_add_fb_offsets(&x, &y, plane_state, 1);
3115 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3116
3117 /* FIXME not quite sure how/if these apply to the chroma plane */
3118 if (w > max_width || h > max_height) {
3119 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3120 w, h, max_width, max_height);
3121 return -EINVAL;
3122 }
3123
3124 plane_state->color_plane[1].offset = offset;
3125 plane_state->color_plane[1].x = x;
3126 plane_state->color_plane[1].y = y;
3127
3128 return 0;
3129 }
3130
3131 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3132 {
3133 const struct drm_framebuffer *fb = plane_state->base.fb;
3134 int src_x = plane_state->base.src.x1 >> 16;
3135 int src_y = plane_state->base.src.y1 >> 16;
3136 int hsub = fb->format->hsub;
3137 int vsub = fb->format->vsub;
3138 int x = src_x / hsub;
3139 int y = src_y / vsub;
3140 u32 offset;
3141
3142 intel_add_fb_offsets(&x, &y, plane_state, 1);
3143 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3144
3145 plane_state->color_plane[1].offset = offset;
3146 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3147 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3148
3149 return 0;
3150 }
3151
3152 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3153 {
3154 const struct drm_framebuffer *fb = plane_state->base.fb;
3155 unsigned int rotation = plane_state->base.rotation;
3156 int ret;
3157
3158 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3159 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3160 plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3161
3162 ret = intel_plane_check_stride(plane_state);
3163 if (ret)
3164 return ret;
3165
3166 if (!plane_state->base.visible)
3167 return 0;
3168
3169 /* Rotate src coordinates to match rotated GTT view */
3170 if (drm_rotation_90_or_270(rotation))
3171 drm_rect_rotate(&plane_state->base.src,
3172 fb->width << 16, fb->height << 16,
3173 DRM_MODE_ROTATE_270);
3174
3175 /*
3176 * Handle the AUX surface first since
3177 * the main surface setup depends on it.
3178 */
3179 if (fb->format->format == DRM_FORMAT_NV12) {
3180 ret = skl_check_nv12_aux_surface(plane_state);
3181 if (ret)
3182 return ret;
3183 } else if (is_ccs_modifier(fb->modifier)) {
3184 ret = skl_check_ccs_aux_surface(plane_state);
3185 if (ret)
3186 return ret;
3187 } else {
3188 plane_state->color_plane[1].offset = ~0xfff;
3189 plane_state->color_plane[1].x = 0;
3190 plane_state->color_plane[1].y = 0;
3191 }
3192
3193 ret = skl_check_main_surface(plane_state);
3194 if (ret)
3195 return ret;
3196
3197 return 0;
3198 }
3199
3200 unsigned int
3201 i9xx_plane_max_stride(struct intel_plane *plane,
3202 u32 pixel_format, u64 modifier,
3203 unsigned int rotation)
3204 {
3205 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3206
3207 if (!HAS_GMCH(dev_priv)) {
3208 return 32*1024;
3209 } else if (INTEL_GEN(dev_priv) >= 4) {
3210 if (modifier == I915_FORMAT_MOD_X_TILED)
3211 return 16*1024;
3212 else
3213 return 32*1024;
3214 } else if (INTEL_GEN(dev_priv) >= 3) {
3215 if (modifier == I915_FORMAT_MOD_X_TILED)
3216 return 8*1024;
3217 else
3218 return 16*1024;
3219 } else {
3220 if (plane->i9xx_plane == PLANE_C)
3221 return 4*1024;
3222 else
3223 return 8*1024;
3224 }
3225 }
3226
3227 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3228 {
3229 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3230 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3231 u32 dspcntr = 0;
3232
3233 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3234
3235 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3236 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3237
3238 if (INTEL_GEN(dev_priv) < 5)
3239 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3240
3241 return dspcntr;
3242 }
3243
3244 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3245 const struct intel_plane_state *plane_state)
3246 {
3247 struct drm_i915_private *dev_priv =
3248 to_i915(plane_state->base.plane->dev);
3249 const struct drm_framebuffer *fb = plane_state->base.fb;
3250 unsigned int rotation = plane_state->base.rotation;
3251 u32 dspcntr;
3252
3253 dspcntr = DISPLAY_PLANE_ENABLE;
3254
3255 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3256 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3257 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3258
3259 switch (fb->format->format) {
3260 case DRM_FORMAT_C8:
3261 dspcntr |= DISPPLANE_8BPP;
3262 break;
3263 case DRM_FORMAT_XRGB1555:
3264 dspcntr |= DISPPLANE_BGRX555;
3265 break;
3266 case DRM_FORMAT_RGB565:
3267 dspcntr |= DISPPLANE_BGRX565;
3268 break;
3269 case DRM_FORMAT_XRGB8888:
3270 dspcntr |= DISPPLANE_BGRX888;
3271 break;
3272 case DRM_FORMAT_XBGR8888:
3273 dspcntr |= DISPPLANE_RGBX888;
3274 break;
3275 case DRM_FORMAT_XRGB2101010:
3276 dspcntr |= DISPPLANE_BGRX101010;
3277 break;
3278 case DRM_FORMAT_XBGR2101010:
3279 dspcntr |= DISPPLANE_RGBX101010;
3280 break;
3281 default:
3282 MISSING_CASE(fb->format->format);
3283 return 0;
3284 }
3285
3286 if (INTEL_GEN(dev_priv) >= 4 &&
3287 fb->modifier == I915_FORMAT_MOD_X_TILED)
3288 dspcntr |= DISPPLANE_TILED;
3289
3290 if (rotation & DRM_MODE_ROTATE_180)
3291 dspcntr |= DISPPLANE_ROTATE_180;
3292
3293 if (rotation & DRM_MODE_REFLECT_X)
3294 dspcntr |= DISPPLANE_MIRROR;
3295
3296 return dspcntr;
3297 }
3298
3299 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3300 {
3301 struct drm_i915_private *dev_priv =
3302 to_i915(plane_state->base.plane->dev);
3303 const struct drm_framebuffer *fb = plane_state->base.fb;
3304 unsigned int rotation = plane_state->base.rotation;
3305 int src_x = plane_state->base.src.x1 >> 16;
3306 int src_y = plane_state->base.src.y1 >> 16;
3307 u32 offset;
3308 int ret;
3309
3310 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3311 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3312
3313 ret = intel_plane_check_stride(plane_state);
3314 if (ret)
3315 return ret;
3316
3317 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3318
3319 if (INTEL_GEN(dev_priv) >= 4)
3320 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3321 plane_state, 0);
3322 else
3323 offset = 0;
3324
3325 /* HSW/BDW do this automagically in hardware */
3326 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3327 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3328 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3329
3330 if (rotation & DRM_MODE_ROTATE_180) {
3331 src_x += src_w - 1;
3332 src_y += src_h - 1;
3333 } else if (rotation & DRM_MODE_REFLECT_X) {
3334 src_x += src_w - 1;
3335 }
3336 }
3337
3338 plane_state->color_plane[0].offset = offset;
3339 plane_state->color_plane[0].x = src_x;
3340 plane_state->color_plane[0].y = src_y;
3341
3342 return 0;
3343 }
3344
3345 static int
3346 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3347 struct intel_plane_state *plane_state)
3348 {
3349 int ret;
3350
3351 ret = chv_plane_check_rotation(plane_state);
3352 if (ret)
3353 return ret;
3354
3355 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3356 &crtc_state->base,
3357 DRM_PLANE_HELPER_NO_SCALING,
3358 DRM_PLANE_HELPER_NO_SCALING,
3359 false, true);
3360 if (ret)
3361 return ret;
3362
3363 if (!plane_state->base.visible)
3364 return 0;
3365
3366 ret = intel_plane_check_src_coordinates(plane_state);
3367 if (ret)
3368 return ret;
3369
3370 ret = i9xx_check_plane_surface(plane_state);
3371 if (ret)
3372 return ret;
3373
3374 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3375
3376 return 0;
3377 }
3378
3379 static void i9xx_update_plane(struct intel_plane *plane,
3380 const struct intel_crtc_state *crtc_state,
3381 const struct intel_plane_state *plane_state)
3382 {
3383 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3384 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3385 u32 linear_offset;
3386 int x = plane_state->color_plane[0].x;
3387 int y = plane_state->color_plane[0].y;
3388 unsigned long irqflags;
3389 u32 dspaddr_offset;
3390 u32 dspcntr;
3391
3392 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3393
3394 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3395
3396 if (INTEL_GEN(dev_priv) >= 4)
3397 dspaddr_offset = plane_state->color_plane[0].offset;
3398 else
3399 dspaddr_offset = linear_offset;
3400
3401 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3402
3403 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3404
3405 if (INTEL_GEN(dev_priv) < 4) {
3406 /* pipesrc and dspsize control the size that is scaled from,
3407 * which should always be the user's requested size.
3408 */
3409 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3410 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3411 ((crtc_state->pipe_src_h - 1) << 16) |
3412 (crtc_state->pipe_src_w - 1));
3413 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3414 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3415 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3416 ((crtc_state->pipe_src_h - 1) << 16) |
3417 (crtc_state->pipe_src_w - 1));
3418 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3419 }
3420
3421 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3422 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3423 } else if (INTEL_GEN(dev_priv) >= 4) {
3424 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3425 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3426 }
3427
3428 /*
3429 * The control register self-arms if the plane was previously
3430 * disabled. Try to make the plane enable atomic by writing
3431 * the control register just before the surface register.
3432 */
3433 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3434 if (INTEL_GEN(dev_priv) >= 4)
3435 I915_WRITE_FW(DSPSURF(i9xx_plane),
3436 intel_plane_ggtt_offset(plane_state) +
3437 dspaddr_offset);
3438 else
3439 I915_WRITE_FW(DSPADDR(i9xx_plane),
3440 intel_plane_ggtt_offset(plane_state) +
3441 dspaddr_offset);
3442
3443 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3444 }
3445
3446 static void i9xx_disable_plane(struct intel_plane *plane,
3447 const struct intel_crtc_state *crtc_state)
3448 {
3449 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3450 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3451 unsigned long irqflags;
3452 u32 dspcntr;
3453
3454 /*
3455 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3456 * enable on ilk+ affect the pipe bottom color as
3457 * well, so we must configure them even if the plane
3458 * is disabled.
3459 *
3460 * On pre-g4x there is no way to gamma correct the
3461 * pipe bottom color but we'll keep on doing this
3462 * anyway.
3463 */
3464 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3465
3466 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3467
3468 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3469 if (INTEL_GEN(dev_priv) >= 4)
3470 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3471 else
3472 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3473
3474 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3475 }
3476
3477 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3478 enum pipe *pipe)
3479 {
3480 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3481 enum intel_display_power_domain power_domain;
3482 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3483 intel_wakeref_t wakeref;
3484 bool ret;
3485 u32 val;
3486
3487 /*
3488 * Not 100% correct for planes that can move between pipes,
3489 * but that's only the case for gen2-4 which don't have any
3490 * display power wells.
3491 */
3492 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3493 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3494 if (!wakeref)
3495 return false;
3496
3497 val = I915_READ(DSPCNTR(i9xx_plane));
3498
3499 ret = val & DISPLAY_PLANE_ENABLE;
3500
3501 if (INTEL_GEN(dev_priv) >= 5)
3502 *pipe = plane->pipe;
3503 else
3504 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3505 DISPPLANE_SEL_PIPE_SHIFT;
3506
3507 intel_display_power_put(dev_priv, power_domain, wakeref);
3508
3509 return ret;
3510 }
3511
3512 static u32
3513 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3514 {
3515 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3516 return 64;
3517 else
3518 return intel_tile_width_bytes(fb, color_plane);
3519 }
3520
3521 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3522 {
3523 struct drm_device *dev = intel_crtc->base.dev;
3524 struct drm_i915_private *dev_priv = to_i915(dev);
3525
3526 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3527 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3528 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3529 }
3530
3531 /*
3532 * This function detaches (aka. unbinds) unused scalers in hardware
3533 */
3534 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3535 {
3536 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3537 const struct intel_crtc_scaler_state *scaler_state =
3538 &crtc_state->scaler_state;
3539 int i;
3540
3541 /* loop through and disable scalers that aren't in use */
3542 for (i = 0; i < intel_crtc->num_scalers; i++) {
3543 if (!scaler_state->scalers[i].in_use)
3544 skl_detach_scaler(intel_crtc, i);
3545 }
3546 }
3547
3548 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3549 int color_plane, unsigned int rotation)
3550 {
3551 /*
3552 * The stride is either expressed as a multiple of 64 bytes chunks for
3553 * linear buffers or in number of tiles for tiled buffers.
3554 */
3555 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3556 return 64;
3557 else if (drm_rotation_90_or_270(rotation))
3558 return intel_tile_height(fb, color_plane);
3559 else
3560 return intel_tile_width_bytes(fb, color_plane);
3561 }
3562
3563 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3564 int color_plane)
3565 {
3566 const struct drm_framebuffer *fb = plane_state->base.fb;
3567 unsigned int rotation = plane_state->base.rotation;
3568 u32 stride = plane_state->color_plane[color_plane].stride;
3569
3570 if (color_plane >= fb->format->num_planes)
3571 return 0;
3572
3573 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3574 }
3575
3576 static u32 skl_plane_ctl_format(u32 pixel_format)
3577 {
3578 switch (pixel_format) {
3579 case DRM_FORMAT_C8:
3580 return PLANE_CTL_FORMAT_INDEXED;
3581 case DRM_FORMAT_RGB565:
3582 return PLANE_CTL_FORMAT_RGB_565;
3583 case DRM_FORMAT_XBGR8888:
3584 case DRM_FORMAT_ABGR8888:
3585 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3586 case DRM_FORMAT_XRGB8888:
3587 case DRM_FORMAT_ARGB8888:
3588 return PLANE_CTL_FORMAT_XRGB_8888;
3589 case DRM_FORMAT_XRGB2101010:
3590 return PLANE_CTL_FORMAT_XRGB_2101010;
3591 case DRM_FORMAT_XBGR2101010:
3592 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3593 case DRM_FORMAT_YUYV:
3594 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3595 case DRM_FORMAT_YVYU:
3596 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3597 case DRM_FORMAT_UYVY:
3598 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3599 case DRM_FORMAT_VYUY:
3600 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3601 case DRM_FORMAT_NV12:
3602 return PLANE_CTL_FORMAT_NV12;
3603 default:
3604 MISSING_CASE(pixel_format);
3605 }
3606
3607 return 0;
3608 }
3609
3610 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3611 {
3612 if (!plane_state->base.fb->format->has_alpha)
3613 return PLANE_CTL_ALPHA_DISABLE;
3614
3615 switch (plane_state->base.pixel_blend_mode) {
3616 case DRM_MODE_BLEND_PIXEL_NONE:
3617 return PLANE_CTL_ALPHA_DISABLE;
3618 case DRM_MODE_BLEND_PREMULTI:
3619 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3620 case DRM_MODE_BLEND_COVERAGE:
3621 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3622 default:
3623 MISSING_CASE(plane_state->base.pixel_blend_mode);
3624 return PLANE_CTL_ALPHA_DISABLE;
3625 }
3626 }
3627
3628 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3629 {
3630 if (!plane_state->base.fb->format->has_alpha)
3631 return PLANE_COLOR_ALPHA_DISABLE;
3632
3633 switch (plane_state->base.pixel_blend_mode) {
3634 case DRM_MODE_BLEND_PIXEL_NONE:
3635 return PLANE_COLOR_ALPHA_DISABLE;
3636 case DRM_MODE_BLEND_PREMULTI:
3637 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3638 case DRM_MODE_BLEND_COVERAGE:
3639 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3640 default:
3641 MISSING_CASE(plane_state->base.pixel_blend_mode);
3642 return PLANE_COLOR_ALPHA_DISABLE;
3643 }
3644 }
3645
3646 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
3647 {
3648 switch (fb_modifier) {
3649 case DRM_FORMAT_MOD_LINEAR:
3650 break;
3651 case I915_FORMAT_MOD_X_TILED:
3652 return PLANE_CTL_TILED_X;
3653 case I915_FORMAT_MOD_Y_TILED:
3654 return PLANE_CTL_TILED_Y;
3655 case I915_FORMAT_MOD_Y_TILED_CCS:
3656 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3657 case I915_FORMAT_MOD_Yf_TILED:
3658 return PLANE_CTL_TILED_YF;
3659 case I915_FORMAT_MOD_Yf_TILED_CCS:
3660 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3661 default:
3662 MISSING_CASE(fb_modifier);
3663 }
3664
3665 return 0;
3666 }
3667
3668 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3669 {
3670 switch (rotate) {
3671 case DRM_MODE_ROTATE_0:
3672 break;
3673 /*
3674 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3675 * while i915 HW rotation is clockwise, thats why this swapping.
3676 */
3677 case DRM_MODE_ROTATE_90:
3678 return PLANE_CTL_ROTATE_270;
3679 case DRM_MODE_ROTATE_180:
3680 return PLANE_CTL_ROTATE_180;
3681 case DRM_MODE_ROTATE_270:
3682 return PLANE_CTL_ROTATE_90;
3683 default:
3684 MISSING_CASE(rotate);
3685 }
3686
3687 return 0;
3688 }
3689
3690 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3691 {
3692 switch (reflect) {
3693 case 0:
3694 break;
3695 case DRM_MODE_REFLECT_X:
3696 return PLANE_CTL_FLIP_HORIZONTAL;
3697 case DRM_MODE_REFLECT_Y:
3698 default:
3699 MISSING_CASE(reflect);
3700 }
3701
3702 return 0;
3703 }
3704
3705 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3706 {
3707 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3708 u32 plane_ctl = 0;
3709
3710 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3711 return plane_ctl;
3712
3713 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
3714 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
3715
3716 return plane_ctl;
3717 }
3718
3719 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3720 const struct intel_plane_state *plane_state)
3721 {
3722 struct drm_i915_private *dev_priv =
3723 to_i915(plane_state->base.plane->dev);
3724 const struct drm_framebuffer *fb = plane_state->base.fb;
3725 unsigned int rotation = plane_state->base.rotation;
3726 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3727 u32 plane_ctl;
3728
3729 plane_ctl = PLANE_CTL_ENABLE;
3730
3731 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3732 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3733 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3734
3735 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3736 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3737
3738 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3739 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3740 }
3741
3742 plane_ctl |= skl_plane_ctl_format(fb->format->format);
3743 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3744 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3745
3746 if (INTEL_GEN(dev_priv) >= 10)
3747 plane_ctl |= cnl_plane_ctl_flip(rotation &
3748 DRM_MODE_REFLECT_MASK);
3749
3750 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3751 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3752 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3753 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3754
3755 return plane_ctl;
3756 }
3757
3758 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
3759 {
3760 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3761 u32 plane_color_ctl = 0;
3762
3763 if (INTEL_GEN(dev_priv) >= 11)
3764 return plane_color_ctl;
3765
3766 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3767 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3768
3769 return plane_color_ctl;
3770 }
3771
3772 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3773 const struct intel_plane_state *plane_state)
3774 {
3775 const struct drm_framebuffer *fb = plane_state->base.fb;
3776 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3777 u32 plane_color_ctl = 0;
3778
3779 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3780 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3781
3782 if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
3783 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3784 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3785 else
3786 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3787
3788 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3789 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3790 } else if (fb->format->is_yuv) {
3791 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3792 }
3793
3794 return plane_color_ctl;
3795 }
3796
3797 static int
3798 __intel_display_resume(struct drm_device *dev,
3799 struct drm_atomic_state *state,
3800 struct drm_modeset_acquire_ctx *ctx)
3801 {
3802 struct drm_crtc_state *crtc_state;
3803 struct drm_crtc *crtc;
3804 int i, ret;
3805
3806 intel_modeset_setup_hw_state(dev, ctx);
3807 i915_redisable_vga(to_i915(dev));
3808
3809 if (!state)
3810 return 0;
3811
3812 /*
3813 * We've duplicated the state, pointers to the old state are invalid.
3814 *
3815 * Don't attempt to use the old state until we commit the duplicated state.
3816 */
3817 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3818 /*
3819 * Force recalculation even if we restore
3820 * current state. With fast modeset this may not result
3821 * in a modeset when the state is compatible.
3822 */
3823 crtc_state->mode_changed = true;
3824 }
3825
3826 /* ignore any reset values/BIOS leftovers in the WM registers */
3827 if (!HAS_GMCH(to_i915(dev)))
3828 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3829
3830 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3831
3832 WARN_ON(ret == -EDEADLK);
3833 return ret;
3834 }
3835
3836 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3837 {
3838 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3839 intel_has_gpu_reset(dev_priv));
3840 }
3841
3842 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3843 {
3844 struct drm_device *dev = &dev_priv->drm;
3845 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3846 struct drm_atomic_state *state;
3847 int ret;
3848
3849 /* reset doesn't touch the display */
3850 if (!i915_modparams.force_reset_modeset_test &&
3851 !gpu_reset_clobbers_display(dev_priv))
3852 return;
3853
3854 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3855 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3856 wake_up_all(&dev_priv->gpu_error.wait_queue);
3857
3858 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3859 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3860 i915_gem_set_wedged(dev_priv);
3861 }
3862
3863 /*
3864 * Need mode_config.mutex so that we don't
3865 * trample ongoing ->detect() and whatnot.
3866 */
3867 mutex_lock(&dev->mode_config.mutex);
3868 drm_modeset_acquire_init(ctx, 0);
3869 while (1) {
3870 ret = drm_modeset_lock_all_ctx(dev, ctx);
3871 if (ret != -EDEADLK)
3872 break;
3873
3874 drm_modeset_backoff(ctx);
3875 }
3876 /*
3877 * Disabling the crtcs gracefully seems nicer. Also the
3878 * g33 docs say we should at least disable all the planes.
3879 */
3880 state = drm_atomic_helper_duplicate_state(dev, ctx);
3881 if (IS_ERR(state)) {
3882 ret = PTR_ERR(state);
3883 DRM_ERROR("Duplicating state failed with %i\n", ret);
3884 return;
3885 }
3886
3887 ret = drm_atomic_helper_disable_all(dev, ctx);
3888 if (ret) {
3889 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3890 drm_atomic_state_put(state);
3891 return;
3892 }
3893
3894 dev_priv->modeset_restore_state = state;
3895 state->acquire_ctx = ctx;
3896 }
3897
3898 void intel_finish_reset(struct drm_i915_private *dev_priv)
3899 {
3900 struct drm_device *dev = &dev_priv->drm;
3901 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3902 struct drm_atomic_state *state;
3903 int ret;
3904
3905 /* reset doesn't touch the display */
3906 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3907 return;
3908
3909 state = fetch_and_zero(&dev_priv->modeset_restore_state);
3910 if (!state)
3911 goto unlock;
3912
3913 /* reset doesn't touch the display */
3914 if (!gpu_reset_clobbers_display(dev_priv)) {
3915 /* for testing only restore the display */
3916 ret = __intel_display_resume(dev, state, ctx);
3917 if (ret)
3918 DRM_ERROR("Restoring old state failed with %i\n", ret);
3919 } else {
3920 /*
3921 * The display has been reset as well,
3922 * so need a full re-initialization.
3923 */
3924 intel_runtime_pm_disable_interrupts(dev_priv);
3925 intel_runtime_pm_enable_interrupts(dev_priv);
3926
3927 intel_pps_unlock_regs_wa(dev_priv);
3928 intel_modeset_init_hw(dev);
3929 intel_init_clock_gating(dev_priv);
3930
3931 spin_lock_irq(&dev_priv->irq_lock);
3932 if (dev_priv->display.hpd_irq_setup)
3933 dev_priv->display.hpd_irq_setup(dev_priv);
3934 spin_unlock_irq(&dev_priv->irq_lock);
3935
3936 ret = __intel_display_resume(dev, state, ctx);
3937 if (ret)
3938 DRM_ERROR("Restoring old state failed with %i\n", ret);
3939
3940 intel_hpd_init(dev_priv);
3941 }
3942
3943 drm_atomic_state_put(state);
3944 unlock:
3945 drm_modeset_drop_locks(ctx);
3946 drm_modeset_acquire_fini(ctx);
3947 mutex_unlock(&dev->mode_config.mutex);
3948
3949 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3950 }
3951
3952 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
3953 {
3954 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3955 enum pipe pipe = crtc->pipe;
3956 u32 tmp;
3957
3958 tmp = I915_READ(PIPE_CHICKEN(pipe));
3959
3960 /*
3961 * Display WA #1153: icl
3962 * enable hardware to bypass the alpha math
3963 * and rounding for per-pixel values 00 and 0xff
3964 */
3965 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
3966
3967 /*
3968 * W/A for underruns with linear/X-tiled with
3969 * WM1+ disabled.
3970 */
3971 tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
3972
3973 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
3974 }
3975
3976 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3977 const struct intel_crtc_state *new_crtc_state)
3978 {
3979 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
3980 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3981
3982 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3983 crtc->base.mode = new_crtc_state->base.mode;
3984
3985 /*
3986 * Update pipe size and adjust fitter if needed: the reason for this is
3987 * that in compute_mode_changes we check the native mode (not the pfit
3988 * mode) to see if we can flip rather than do a full mode set. In the
3989 * fastboot case, we'll flip, but if we don't update the pipesrc and
3990 * pfit state, we'll end up with a big fb scanned out into the wrong
3991 * sized surface.
3992 */
3993
3994 I915_WRITE(PIPESRC(crtc->pipe),
3995 ((new_crtc_state->pipe_src_w - 1) << 16) |
3996 (new_crtc_state->pipe_src_h - 1));
3997
3998 /* on skylake this is done by detaching scalers */
3999 if (INTEL_GEN(dev_priv) >= 9) {
4000 skl_detach_scalers(new_crtc_state);
4001
4002 if (new_crtc_state->pch_pfit.enabled)
4003 skylake_pfit_enable(new_crtc_state);
4004 } else if (HAS_PCH_SPLIT(dev_priv)) {
4005 if (new_crtc_state->pch_pfit.enabled)
4006 ironlake_pfit_enable(new_crtc_state);
4007 else if (old_crtc_state->pch_pfit.enabled)
4008 ironlake_pfit_disable(old_crtc_state);
4009 }
4010
4011 /*
4012 * We don't (yet) allow userspace to control the pipe background color,
4013 * so force it to black, but apply pipe gamma and CSC so that its
4014 * handling will match how we program our planes.
4015 */
4016 if (INTEL_GEN(dev_priv) >= 9)
4017 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
4018 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
4019 SKL_BOTTOM_COLOR_CSC_ENABLE);
4020
4021 if (INTEL_GEN(dev_priv) >= 11)
4022 icl_set_pipe_chicken(crtc);
4023 }
4024
4025 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4026 {
4027 struct drm_device *dev = crtc->base.dev;
4028 struct drm_i915_private *dev_priv = to_i915(dev);
4029 int pipe = crtc->pipe;
4030 i915_reg_t reg;
4031 u32 temp;
4032
4033 /* enable normal train */
4034 reg = FDI_TX_CTL(pipe);
4035 temp = I915_READ(reg);
4036 if (IS_IVYBRIDGE(dev_priv)) {
4037 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4038 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4039 } else {
4040 temp &= ~FDI_LINK_TRAIN_NONE;
4041 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4042 }
4043 I915_WRITE(reg, temp);
4044
4045 reg = FDI_RX_CTL(pipe);
4046 temp = I915_READ(reg);
4047 if (HAS_PCH_CPT(dev_priv)) {
4048 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4049 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4050 } else {
4051 temp &= ~FDI_LINK_TRAIN_NONE;
4052 temp |= FDI_LINK_TRAIN_NONE;
4053 }
4054 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4055
4056 /* wait one idle pattern time */
4057 POSTING_READ(reg);
4058 udelay(1000);
4059
4060 /* IVB wants error correction enabled */
4061 if (IS_IVYBRIDGE(dev_priv))
4062 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4063 FDI_FE_ERRC_ENABLE);
4064 }
4065
4066 /* The FDI link training functions for ILK/Ibexpeak. */
4067 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4068 const struct intel_crtc_state *crtc_state)
4069 {
4070 struct drm_device *dev = crtc->base.dev;
4071 struct drm_i915_private *dev_priv = to_i915(dev);
4072 int pipe = crtc->pipe;
4073 i915_reg_t reg;
4074 u32 temp, tries;
4075
4076 /* FDI needs bits from pipe first */
4077 assert_pipe_enabled(dev_priv, pipe);
4078
4079 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4080 for train result */
4081 reg = FDI_RX_IMR(pipe);
4082 temp = I915_READ(reg);
4083 temp &= ~FDI_RX_SYMBOL_LOCK;
4084 temp &= ~FDI_RX_BIT_LOCK;
4085 I915_WRITE(reg, temp);
4086 I915_READ(reg);
4087 udelay(150);
4088
4089 /* enable CPU FDI TX and PCH FDI RX */
4090 reg = FDI_TX_CTL(pipe);
4091 temp = I915_READ(reg);
4092 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4093 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4094 temp &= ~FDI_LINK_TRAIN_NONE;
4095 temp |= FDI_LINK_TRAIN_PATTERN_1;
4096 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4097
4098 reg = FDI_RX_CTL(pipe);
4099 temp = I915_READ(reg);
4100 temp &= ~FDI_LINK_TRAIN_NONE;
4101 temp |= FDI_LINK_TRAIN_PATTERN_1;
4102 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4103
4104 POSTING_READ(reg);
4105 udelay(150);
4106
4107 /* Ironlake workaround, enable clock pointer after FDI enable*/
4108 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4109 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4110 FDI_RX_PHASE_SYNC_POINTER_EN);
4111
4112 reg = FDI_RX_IIR(pipe);
4113 for (tries = 0; tries < 5; tries++) {
4114 temp = I915_READ(reg);
4115 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4116
4117 if ((temp & FDI_RX_BIT_LOCK)) {
4118 DRM_DEBUG_KMS("FDI train 1 done.\n");
4119 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4120 break;
4121 }
4122 }
4123 if (tries == 5)
4124 DRM_ERROR("FDI train 1 fail!\n");
4125
4126 /* Train 2 */
4127 reg = FDI_TX_CTL(pipe);
4128 temp = I915_READ(reg);
4129 temp &= ~FDI_LINK_TRAIN_NONE;
4130 temp |= FDI_LINK_TRAIN_PATTERN_2;
4131 I915_WRITE(reg, temp);
4132
4133 reg = FDI_RX_CTL(pipe);
4134 temp = I915_READ(reg);
4135 temp &= ~FDI_LINK_TRAIN_NONE;
4136 temp |= FDI_LINK_TRAIN_PATTERN_2;
4137 I915_WRITE(reg, temp);
4138
4139 POSTING_READ(reg);
4140 udelay(150);
4141
4142 reg = FDI_RX_IIR(pipe);
4143 for (tries = 0; tries < 5; tries++) {
4144 temp = I915_READ(reg);
4145 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4146
4147 if (temp & FDI_RX_SYMBOL_LOCK) {
4148 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4149 DRM_DEBUG_KMS("FDI train 2 done.\n");
4150 break;
4151 }
4152 }
4153 if (tries == 5)
4154 DRM_ERROR("FDI train 2 fail!\n");
4155
4156 DRM_DEBUG_KMS("FDI train done\n");
4157
4158 }
4159
4160 static const int snb_b_fdi_train_param[] = {
4161 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4162 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4163 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4164 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4165 };
4166
4167 /* The FDI link training functions for SNB/Cougarpoint. */
4168 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4169 const struct intel_crtc_state *crtc_state)
4170 {
4171 struct drm_device *dev = crtc->base.dev;
4172 struct drm_i915_private *dev_priv = to_i915(dev);
4173 int pipe = crtc->pipe;
4174 i915_reg_t reg;
4175 u32 temp, i, retry;
4176
4177 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4178 for train result */
4179 reg = FDI_RX_IMR(pipe);
4180 temp = I915_READ(reg);
4181 temp &= ~FDI_RX_SYMBOL_LOCK;
4182 temp &= ~FDI_RX_BIT_LOCK;
4183 I915_WRITE(reg, temp);
4184
4185 POSTING_READ(reg);
4186 udelay(150);
4187
4188 /* enable CPU FDI TX and PCH FDI RX */
4189 reg = FDI_TX_CTL(pipe);
4190 temp = I915_READ(reg);
4191 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4192 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4193 temp &= ~FDI_LINK_TRAIN_NONE;
4194 temp |= FDI_LINK_TRAIN_PATTERN_1;
4195 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4196 /* SNB-B */
4197 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4198 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4199
4200 I915_WRITE(FDI_RX_MISC(pipe),
4201 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4202
4203 reg = FDI_RX_CTL(pipe);
4204 temp = I915_READ(reg);
4205 if (HAS_PCH_CPT(dev_priv)) {
4206 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4207 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4208 } else {
4209 temp &= ~FDI_LINK_TRAIN_NONE;
4210 temp |= FDI_LINK_TRAIN_PATTERN_1;
4211 }
4212 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4213
4214 POSTING_READ(reg);
4215 udelay(150);
4216
4217 for (i = 0; i < 4; i++) {
4218 reg = FDI_TX_CTL(pipe);
4219 temp = I915_READ(reg);
4220 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4221 temp |= snb_b_fdi_train_param[i];
4222 I915_WRITE(reg, temp);
4223
4224 POSTING_READ(reg);
4225 udelay(500);
4226
4227 for (retry = 0; retry < 5; retry++) {
4228 reg = FDI_RX_IIR(pipe);
4229 temp = I915_READ(reg);
4230 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4231 if (temp & FDI_RX_BIT_LOCK) {
4232 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4233 DRM_DEBUG_KMS("FDI train 1 done.\n");
4234 break;
4235 }
4236 udelay(50);
4237 }
4238 if (retry < 5)
4239 break;
4240 }
4241 if (i == 4)
4242 DRM_ERROR("FDI train 1 fail!\n");
4243
4244 /* Train 2 */
4245 reg = FDI_TX_CTL(pipe);
4246 temp = I915_READ(reg);
4247 temp &= ~FDI_LINK_TRAIN_NONE;
4248 temp |= FDI_LINK_TRAIN_PATTERN_2;
4249 if (IS_GEN(dev_priv, 6)) {
4250 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4251 /* SNB-B */
4252 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4253 }
4254 I915_WRITE(reg, temp);
4255
4256 reg = FDI_RX_CTL(pipe);
4257 temp = I915_READ(reg);
4258 if (HAS_PCH_CPT(dev_priv)) {
4259 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4260 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4261 } else {
4262 temp &= ~FDI_LINK_TRAIN_NONE;
4263 temp |= FDI_LINK_TRAIN_PATTERN_2;
4264 }
4265 I915_WRITE(reg, temp);
4266
4267 POSTING_READ(reg);
4268 udelay(150);
4269
4270 for (i = 0; i < 4; i++) {
4271 reg = FDI_TX_CTL(pipe);
4272 temp = I915_READ(reg);
4273 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4274 temp |= snb_b_fdi_train_param[i];
4275 I915_WRITE(reg, temp);
4276
4277 POSTING_READ(reg);
4278 udelay(500);
4279
4280 for (retry = 0; retry < 5; retry++) {
4281 reg = FDI_RX_IIR(pipe);
4282 temp = I915_READ(reg);
4283 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4284 if (temp & FDI_RX_SYMBOL_LOCK) {
4285 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4286 DRM_DEBUG_KMS("FDI train 2 done.\n");
4287 break;
4288 }
4289 udelay(50);
4290 }
4291 if (retry < 5)
4292 break;
4293 }
4294 if (i == 4)
4295 DRM_ERROR("FDI train 2 fail!\n");
4296
4297 DRM_DEBUG_KMS("FDI train done.\n");
4298 }
4299
4300 /* Manual link training for Ivy Bridge A0 parts */
4301 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4302 const struct intel_crtc_state *crtc_state)
4303 {
4304 struct drm_device *dev = crtc->base.dev;
4305 struct drm_i915_private *dev_priv = to_i915(dev);
4306 int pipe = crtc->pipe;
4307 i915_reg_t reg;
4308 u32 temp, i, j;
4309
4310 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4311 for train result */
4312 reg = FDI_RX_IMR(pipe);
4313 temp = I915_READ(reg);
4314 temp &= ~FDI_RX_SYMBOL_LOCK;
4315 temp &= ~FDI_RX_BIT_LOCK;
4316 I915_WRITE(reg, temp);
4317
4318 POSTING_READ(reg);
4319 udelay(150);
4320
4321 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4322 I915_READ(FDI_RX_IIR(pipe)));
4323
4324 /* Try each vswing and preemphasis setting twice before moving on */
4325 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4326 /* disable first in case we need to retry */
4327 reg = FDI_TX_CTL(pipe);
4328 temp = I915_READ(reg);
4329 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4330 temp &= ~FDI_TX_ENABLE;
4331 I915_WRITE(reg, temp);
4332
4333 reg = FDI_RX_CTL(pipe);
4334 temp = I915_READ(reg);
4335 temp &= ~FDI_LINK_TRAIN_AUTO;
4336 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4337 temp &= ~FDI_RX_ENABLE;
4338 I915_WRITE(reg, temp);
4339
4340 /* enable CPU FDI TX and PCH FDI RX */
4341 reg = FDI_TX_CTL(pipe);
4342 temp = I915_READ(reg);
4343 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4344 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4345 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4346 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4347 temp |= snb_b_fdi_train_param[j/2];
4348 temp |= FDI_COMPOSITE_SYNC;
4349 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4350
4351 I915_WRITE(FDI_RX_MISC(pipe),
4352 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4353
4354 reg = FDI_RX_CTL(pipe);
4355 temp = I915_READ(reg);
4356 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4357 temp |= FDI_COMPOSITE_SYNC;
4358 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4359
4360 POSTING_READ(reg);
4361 udelay(1); /* should be 0.5us */
4362
4363 for (i = 0; i < 4; i++) {
4364 reg = FDI_RX_IIR(pipe);
4365 temp = I915_READ(reg);
4366 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4367
4368 if (temp & FDI_RX_BIT_LOCK ||
4369 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4370 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4371 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4372 i);
4373 break;
4374 }
4375 udelay(1); /* should be 0.5us */
4376 }
4377 if (i == 4) {
4378 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4379 continue;
4380 }
4381
4382 /* Train 2 */
4383 reg = FDI_TX_CTL(pipe);
4384 temp = I915_READ(reg);
4385 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4386 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4387 I915_WRITE(reg, temp);
4388
4389 reg = FDI_RX_CTL(pipe);
4390 temp = I915_READ(reg);
4391 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4392 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4393 I915_WRITE(reg, temp);
4394
4395 POSTING_READ(reg);
4396 udelay(2); /* should be 1.5us */
4397
4398 for (i = 0; i < 4; i++) {
4399 reg = FDI_RX_IIR(pipe);
4400 temp = I915_READ(reg);
4401 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4402
4403 if (temp & FDI_RX_SYMBOL_LOCK ||
4404 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4405 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4406 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4407 i);
4408 goto train_done;
4409 }
4410 udelay(2); /* should be 1.5us */
4411 }
4412 if (i == 4)
4413 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4414 }
4415
4416 train_done:
4417 DRM_DEBUG_KMS("FDI train done.\n");
4418 }
4419
4420 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4421 {
4422 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4423 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4424 int pipe = intel_crtc->pipe;
4425 i915_reg_t reg;
4426 u32 temp;
4427
4428 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4429 reg = FDI_RX_CTL(pipe);
4430 temp = I915_READ(reg);
4431 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4432 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4433 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4434 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4435
4436 POSTING_READ(reg);
4437 udelay(200);
4438
4439 /* Switch from Rawclk to PCDclk */
4440 temp = I915_READ(reg);
4441 I915_WRITE(reg, temp | FDI_PCDCLK);
4442
4443 POSTING_READ(reg);
4444 udelay(200);
4445
4446 /* Enable CPU FDI TX PLL, always on for Ironlake */
4447 reg = FDI_TX_CTL(pipe);
4448 temp = I915_READ(reg);
4449 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4450 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4451
4452 POSTING_READ(reg);
4453 udelay(100);
4454 }
4455 }
4456
4457 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4458 {
4459 struct drm_device *dev = intel_crtc->base.dev;
4460 struct drm_i915_private *dev_priv = to_i915(dev);
4461 int pipe = intel_crtc->pipe;
4462 i915_reg_t reg;
4463 u32 temp;
4464
4465 /* Switch from PCDclk to Rawclk */
4466 reg = FDI_RX_CTL(pipe);
4467 temp = I915_READ(reg);
4468 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4469
4470 /* Disable CPU FDI TX PLL */
4471 reg = FDI_TX_CTL(pipe);
4472 temp = I915_READ(reg);
4473 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4474
4475 POSTING_READ(reg);
4476 udelay(100);
4477
4478 reg = FDI_RX_CTL(pipe);
4479 temp = I915_READ(reg);
4480 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4481
4482 /* Wait for the clocks to turn off. */
4483 POSTING_READ(reg);
4484 udelay(100);
4485 }
4486
4487 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4488 {
4489 struct drm_device *dev = crtc->dev;
4490 struct drm_i915_private *dev_priv = to_i915(dev);
4491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4492 int pipe = intel_crtc->pipe;
4493 i915_reg_t reg;
4494 u32 temp;
4495
4496 /* disable CPU FDI tx and PCH FDI rx */
4497 reg = FDI_TX_CTL(pipe);
4498 temp = I915_READ(reg);
4499 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4500 POSTING_READ(reg);
4501
4502 reg = FDI_RX_CTL(pipe);
4503 temp = I915_READ(reg);
4504 temp &= ~(0x7 << 16);
4505 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4506 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4507
4508 POSTING_READ(reg);
4509 udelay(100);
4510
4511 /* Ironlake workaround, disable clock pointer after downing FDI */
4512 if (HAS_PCH_IBX(dev_priv))
4513 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4514
4515 /* still set train pattern 1 */
4516 reg = FDI_TX_CTL(pipe);
4517 temp = I915_READ(reg);
4518 temp &= ~FDI_LINK_TRAIN_NONE;
4519 temp |= FDI_LINK_TRAIN_PATTERN_1;
4520 I915_WRITE(reg, temp);
4521
4522 reg = FDI_RX_CTL(pipe);
4523 temp = I915_READ(reg);
4524 if (HAS_PCH_CPT(dev_priv)) {
4525 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4526 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4527 } else {
4528 temp &= ~FDI_LINK_TRAIN_NONE;
4529 temp |= FDI_LINK_TRAIN_PATTERN_1;
4530 }
4531 /* BPC in FDI rx is consistent with that in PIPECONF */
4532 temp &= ~(0x07 << 16);
4533 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4534 I915_WRITE(reg, temp);
4535
4536 POSTING_READ(reg);
4537 udelay(100);
4538 }
4539
4540 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4541 {
4542 struct drm_crtc *crtc;
4543 bool cleanup_done;
4544
4545 drm_for_each_crtc(crtc, &dev_priv->drm) {
4546 struct drm_crtc_commit *commit;
4547 spin_lock(&crtc->commit_lock);
4548 commit = list_first_entry_or_null(&crtc->commit_list,
4549 struct drm_crtc_commit, commit_entry);
4550 cleanup_done = commit ?
4551 try_wait_for_completion(&commit->cleanup_done) : true;
4552 spin_unlock(&crtc->commit_lock);
4553
4554 if (cleanup_done)
4555 continue;
4556
4557 drm_crtc_wait_one_vblank(crtc);
4558
4559 return true;
4560 }
4561
4562 return false;
4563 }
4564
4565 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4566 {
4567 u32 temp;
4568
4569 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4570
4571 mutex_lock(&dev_priv->sb_lock);
4572
4573 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4574 temp |= SBI_SSCCTL_DISABLE;
4575 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4576
4577 mutex_unlock(&dev_priv->sb_lock);
4578 }
4579
4580 /* Program iCLKIP clock to the desired frequency */
4581 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4582 {
4583 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4584 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4585 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4586 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4587 u32 temp;
4588
4589 lpt_disable_iclkip(dev_priv);
4590
4591 /* The iCLK virtual clock root frequency is in MHz,
4592 * but the adjusted_mode->crtc_clock in in KHz. To get the
4593 * divisors, it is necessary to divide one by another, so we
4594 * convert the virtual clock precision to KHz here for higher
4595 * precision.
4596 */
4597 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4598 u32 iclk_virtual_root_freq = 172800 * 1000;
4599 u32 iclk_pi_range = 64;
4600 u32 desired_divisor;
4601
4602 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4603 clock << auxdiv);
4604 divsel = (desired_divisor / iclk_pi_range) - 2;
4605 phaseinc = desired_divisor % iclk_pi_range;
4606
4607 /*
4608 * Near 20MHz is a corner case which is
4609 * out of range for the 7-bit divisor
4610 */
4611 if (divsel <= 0x7f)
4612 break;
4613 }
4614
4615 /* This should not happen with any sane values */
4616 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4617 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4618 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4619 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4620
4621 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4622 clock,
4623 auxdiv,
4624 divsel,
4625 phasedir,
4626 phaseinc);
4627
4628 mutex_lock(&dev_priv->sb_lock);
4629
4630 /* Program SSCDIVINTPHASE6 */
4631 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4632 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4633 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4634 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4635 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4636 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4637 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4638 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4639
4640 /* Program SSCAUXDIV */
4641 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4642 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4643 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4644 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4645
4646 /* Enable modulator and associated divider */
4647 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4648 temp &= ~SBI_SSCCTL_DISABLE;
4649 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4650
4651 mutex_unlock(&dev_priv->sb_lock);
4652
4653 /* Wait for initialization time */
4654 udelay(24);
4655
4656 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4657 }
4658
4659 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4660 {
4661 u32 divsel, phaseinc, auxdiv;
4662 u32 iclk_virtual_root_freq = 172800 * 1000;
4663 u32 iclk_pi_range = 64;
4664 u32 desired_divisor;
4665 u32 temp;
4666
4667 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4668 return 0;
4669
4670 mutex_lock(&dev_priv->sb_lock);
4671
4672 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4673 if (temp & SBI_SSCCTL_DISABLE) {
4674 mutex_unlock(&dev_priv->sb_lock);
4675 return 0;
4676 }
4677
4678 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4679 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4680 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4681 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4682 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4683
4684 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4685 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4686 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4687
4688 mutex_unlock(&dev_priv->sb_lock);
4689
4690 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4691
4692 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4693 desired_divisor << auxdiv);
4694 }
4695
4696 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4697 enum pipe pch_transcoder)
4698 {
4699 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4700 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4701 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4702
4703 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4704 I915_READ(HTOTAL(cpu_transcoder)));
4705 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4706 I915_READ(HBLANK(cpu_transcoder)));
4707 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4708 I915_READ(HSYNC(cpu_transcoder)));
4709
4710 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4711 I915_READ(VTOTAL(cpu_transcoder)));
4712 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4713 I915_READ(VBLANK(cpu_transcoder)));
4714 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4715 I915_READ(VSYNC(cpu_transcoder)));
4716 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4717 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4718 }
4719
4720 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4721 {
4722 u32 temp;
4723
4724 temp = I915_READ(SOUTH_CHICKEN1);
4725 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4726 return;
4727
4728 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4729 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4730
4731 temp &= ~FDI_BC_BIFURCATION_SELECT;
4732 if (enable)
4733 temp |= FDI_BC_BIFURCATION_SELECT;
4734
4735 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4736 I915_WRITE(SOUTH_CHICKEN1, temp);
4737 POSTING_READ(SOUTH_CHICKEN1);
4738 }
4739
4740 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4741 {
4742 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4743 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4744
4745 switch (crtc->pipe) {
4746 case PIPE_A:
4747 break;
4748 case PIPE_B:
4749 if (crtc_state->fdi_lanes > 2)
4750 cpt_set_fdi_bc_bifurcation(dev_priv, false);
4751 else
4752 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4753
4754 break;
4755 case PIPE_C:
4756 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4757
4758 break;
4759 default:
4760 BUG();
4761 }
4762 }
4763
4764 /*
4765 * Finds the encoder associated with the given CRTC. This can only be
4766 * used when we know that the CRTC isn't feeding multiple encoders!
4767 */
4768 static struct intel_encoder *
4769 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4770 const struct intel_crtc_state *crtc_state)
4771 {
4772 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4773 const struct drm_connector_state *connector_state;
4774 const struct drm_connector *connector;
4775 struct intel_encoder *encoder = NULL;
4776 int num_encoders = 0;
4777 int i;
4778
4779 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4780 if (connector_state->crtc != &crtc->base)
4781 continue;
4782
4783 encoder = to_intel_encoder(connector_state->best_encoder);
4784 num_encoders++;
4785 }
4786
4787 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4788 num_encoders, pipe_name(crtc->pipe));
4789
4790 return encoder;
4791 }
4792
4793 /*
4794 * Enable PCH resources required for PCH ports:
4795 * - PCH PLLs
4796 * - FDI training & RX/TX
4797 * - update transcoder timings
4798 * - DP transcoding bits
4799 * - transcoder
4800 */
4801 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4802 const struct intel_crtc_state *crtc_state)
4803 {
4804 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4805 struct drm_device *dev = crtc->base.dev;
4806 struct drm_i915_private *dev_priv = to_i915(dev);
4807 int pipe = crtc->pipe;
4808 u32 temp;
4809
4810 assert_pch_transcoder_disabled(dev_priv, pipe);
4811
4812 if (IS_IVYBRIDGE(dev_priv))
4813 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4814
4815 /* Write the TU size bits before fdi link training, so that error
4816 * detection works. */
4817 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4818 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4819
4820 /* For PCH output, training FDI link */
4821 dev_priv->display.fdi_link_train(crtc, crtc_state);
4822
4823 /* We need to program the right clock selection before writing the pixel
4824 * mutliplier into the DPLL. */
4825 if (HAS_PCH_CPT(dev_priv)) {
4826 u32 sel;
4827
4828 temp = I915_READ(PCH_DPLL_SEL);
4829 temp |= TRANS_DPLL_ENABLE(pipe);
4830 sel = TRANS_DPLLB_SEL(pipe);
4831 if (crtc_state->shared_dpll ==
4832 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4833 temp |= sel;
4834 else
4835 temp &= ~sel;
4836 I915_WRITE(PCH_DPLL_SEL, temp);
4837 }
4838
4839 /* XXX: pch pll's can be enabled any time before we enable the PCH
4840 * transcoder, and we actually should do this to not upset any PCH
4841 * transcoder that already use the clock when we share it.
4842 *
4843 * Note that enable_shared_dpll tries to do the right thing, but
4844 * get_shared_dpll unconditionally resets the pll - we need that to have
4845 * the right LVDS enable sequence. */
4846 intel_enable_shared_dpll(crtc_state);
4847
4848 /* set transcoder timing, panel must allow it */
4849 assert_panel_unlocked(dev_priv, pipe);
4850 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4851
4852 intel_fdi_normal_train(crtc);
4853
4854 /* For PCH DP, enable TRANS_DP_CTL */
4855 if (HAS_PCH_CPT(dev_priv) &&
4856 intel_crtc_has_dp_encoder(crtc_state)) {
4857 const struct drm_display_mode *adjusted_mode =
4858 &crtc_state->base.adjusted_mode;
4859 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4860 i915_reg_t reg = TRANS_DP_CTL(pipe);
4861 enum port port;
4862
4863 temp = I915_READ(reg);
4864 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4865 TRANS_DP_SYNC_MASK |
4866 TRANS_DP_BPC_MASK);
4867 temp |= TRANS_DP_OUTPUT_ENABLE;
4868 temp |= bpc << 9; /* same format but at 11:9 */
4869
4870 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4871 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4872 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4873 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4874
4875 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4876 WARN_ON(port < PORT_B || port > PORT_D);
4877 temp |= TRANS_DP_PORT_SEL(port);
4878
4879 I915_WRITE(reg, temp);
4880 }
4881
4882 ironlake_enable_pch_transcoder(crtc_state);
4883 }
4884
4885 static void lpt_pch_enable(const struct intel_atomic_state *state,
4886 const struct intel_crtc_state *crtc_state)
4887 {
4888 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4890 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4891
4892 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4893
4894 lpt_program_iclkip(crtc_state);
4895
4896 /* Set transcoder timing. */
4897 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4898
4899 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4900 }
4901
4902 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4903 {
4904 struct drm_i915_private *dev_priv = to_i915(dev);
4905 i915_reg_t dslreg = PIPEDSL(pipe);
4906 u32 temp;
4907
4908 temp = I915_READ(dslreg);
4909 udelay(500);
4910 if (wait_for(I915_READ(dslreg) != temp, 5)) {
4911 if (wait_for(I915_READ(dslreg) != temp, 5))
4912 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4913 }
4914 }
4915
4916 /*
4917 * The hardware phase 0.0 refers to the center of the pixel.
4918 * We want to start from the top/left edge which is phase
4919 * -0.5. That matches how the hardware calculates the scaling
4920 * factors (from top-left of the first pixel to bottom-right
4921 * of the last pixel, as opposed to the pixel centers).
4922 *
4923 * For 4:2:0 subsampled chroma planes we obviously have to
4924 * adjust that so that the chroma sample position lands in
4925 * the right spot.
4926 *
4927 * Note that for packed YCbCr 4:2:2 formats there is no way to
4928 * control chroma siting. The hardware simply replicates the
4929 * chroma samples for both of the luma samples, and thus we don't
4930 * actually get the expected MPEG2 chroma siting convention :(
4931 * The same behaviour is observed on pre-SKL platforms as well.
4932 *
4933 * Theory behind the formula (note that we ignore sub-pixel
4934 * source coordinates):
4935 * s = source sample position
4936 * d = destination sample position
4937 *
4938 * Downscaling 4:1:
4939 * -0.5
4940 * | 0.0
4941 * | | 1.5 (initial phase)
4942 * | | |
4943 * v v v
4944 * | s | s | s | s |
4945 * | d |
4946 *
4947 * Upscaling 1:4:
4948 * -0.5
4949 * | -0.375 (initial phase)
4950 * | | 0.0
4951 * | | |
4952 * v v v
4953 * | s |
4954 * | d | d | d | d |
4955 */
4956 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
4957 {
4958 int phase = -0x8000;
4959 u16 trip = 0;
4960
4961 if (chroma_cosited)
4962 phase += (sub - 1) * 0x8000 / sub;
4963
4964 phase += scale / (2 * sub);
4965
4966 /*
4967 * Hardware initial phase limited to [-0.5:1.5].
4968 * Since the max hardware scale factor is 3.0, we
4969 * should never actually excdeed 1.0 here.
4970 */
4971 WARN_ON(phase < -0x8000 || phase > 0x18000);
4972
4973 if (phase < 0)
4974 phase = 0x10000 + phase;
4975 else
4976 trip = PS_PHASE_TRIP;
4977
4978 return ((phase >> 2) & PS_PHASE_MASK) | trip;
4979 }
4980
4981 static int
4982 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4983 unsigned int scaler_user, int *scaler_id,
4984 int src_w, int src_h, int dst_w, int dst_h,
4985 const struct drm_format_info *format, bool need_scaler)
4986 {
4987 struct intel_crtc_scaler_state *scaler_state =
4988 &crtc_state->scaler_state;
4989 struct intel_crtc *intel_crtc =
4990 to_intel_crtc(crtc_state->base.crtc);
4991 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4992 const struct drm_display_mode *adjusted_mode =
4993 &crtc_state->base.adjusted_mode;
4994
4995 /*
4996 * Src coordinates are already rotated by 270 degrees for
4997 * the 90/270 degree plane rotation cases (to match the
4998 * GTT mapping), hence no need to account for rotation here.
4999 */
5000 if (src_w != dst_w || src_h != dst_h)
5001 need_scaler = true;
5002
5003 /*
5004 * Scaling/fitting not supported in IF-ID mode in GEN9+
5005 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5006 * Once NV12 is enabled, handle it here while allocating scaler
5007 * for NV12.
5008 */
5009 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5010 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5011 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5012 return -EINVAL;
5013 }
5014
5015 /*
5016 * if plane is being disabled or scaler is no more required or force detach
5017 * - free scaler binded to this plane/crtc
5018 * - in order to do this, update crtc->scaler_usage
5019 *
5020 * Here scaler state in crtc_state is set free so that
5021 * scaler can be assigned to other user. Actual register
5022 * update to free the scaler is done in plane/panel-fit programming.
5023 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5024 */
5025 if (force_detach || !need_scaler) {
5026 if (*scaler_id >= 0) {
5027 scaler_state->scaler_users &= ~(1 << scaler_user);
5028 scaler_state->scalers[*scaler_id].in_use = 0;
5029
5030 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5031 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5032 intel_crtc->pipe, scaler_user, *scaler_id,
5033 scaler_state->scaler_users);
5034 *scaler_id = -1;
5035 }
5036 return 0;
5037 }
5038
5039 if (format && format->format == DRM_FORMAT_NV12 &&
5040 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5041 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
5042 return -EINVAL;
5043 }
5044
5045 /* range checks */
5046 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5047 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5048 (IS_GEN(dev_priv, 11) &&
5049 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5050 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5051 (!IS_GEN(dev_priv, 11) &&
5052 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5053 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5054 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5055 "size is out of scaler range\n",
5056 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5057 return -EINVAL;
5058 }
5059
5060 /* mark this plane as a scaler user in crtc_state */
5061 scaler_state->scaler_users |= (1 << scaler_user);
5062 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5063 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5064 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5065 scaler_state->scaler_users);
5066
5067 return 0;
5068 }
5069
5070 /**
5071 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5072 *
5073 * @state: crtc's scaler state
5074 *
5075 * Return
5076 * 0 - scaler_usage updated successfully
5077 * error - requested scaling cannot be supported or other error condition
5078 */
5079 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5080 {
5081 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5082 bool need_scaler = false;
5083
5084 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5085 need_scaler = true;
5086
5087 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5088 &state->scaler_state.scaler_id,
5089 state->pipe_src_w, state->pipe_src_h,
5090 adjusted_mode->crtc_hdisplay,
5091 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5092 }
5093
5094 /**
5095 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5096 * @crtc_state: crtc's scaler state
5097 * @plane_state: atomic plane state to update
5098 *
5099 * Return
5100 * 0 - scaler_usage updated successfully
5101 * error - requested scaling cannot be supported or other error condition
5102 */
5103 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5104 struct intel_plane_state *plane_state)
5105 {
5106 struct intel_plane *intel_plane =
5107 to_intel_plane(plane_state->base.plane);
5108 struct drm_framebuffer *fb = plane_state->base.fb;
5109 int ret;
5110 bool force_detach = !fb || !plane_state->base.visible;
5111 bool need_scaler = false;
5112
5113 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5114 if (!icl_is_hdr_plane(intel_plane) &&
5115 fb && fb->format->format == DRM_FORMAT_NV12)
5116 need_scaler = true;
5117
5118 ret = skl_update_scaler(crtc_state, force_detach,
5119 drm_plane_index(&intel_plane->base),
5120 &plane_state->scaler_id,
5121 drm_rect_width(&plane_state->base.src) >> 16,
5122 drm_rect_height(&plane_state->base.src) >> 16,
5123 drm_rect_width(&plane_state->base.dst),
5124 drm_rect_height(&plane_state->base.dst),
5125 fb ? fb->format : NULL, need_scaler);
5126
5127 if (ret || plane_state->scaler_id < 0)
5128 return ret;
5129
5130 /* check colorkey */
5131 if (plane_state->ckey.flags) {
5132 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5133 intel_plane->base.base.id,
5134 intel_plane->base.name);
5135 return -EINVAL;
5136 }
5137
5138 /* Check src format */
5139 switch (fb->format->format) {
5140 case DRM_FORMAT_RGB565:
5141 case DRM_FORMAT_XBGR8888:
5142 case DRM_FORMAT_XRGB8888:
5143 case DRM_FORMAT_ABGR8888:
5144 case DRM_FORMAT_ARGB8888:
5145 case DRM_FORMAT_XRGB2101010:
5146 case DRM_FORMAT_XBGR2101010:
5147 case DRM_FORMAT_YUYV:
5148 case DRM_FORMAT_YVYU:
5149 case DRM_FORMAT_UYVY:
5150 case DRM_FORMAT_VYUY:
5151 case DRM_FORMAT_NV12:
5152 break;
5153 default:
5154 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5155 intel_plane->base.base.id, intel_plane->base.name,
5156 fb->base.id, fb->format->format);
5157 return -EINVAL;
5158 }
5159
5160 return 0;
5161 }
5162
5163 static void skylake_scaler_disable(struct intel_crtc *crtc)
5164 {
5165 int i;
5166
5167 for (i = 0; i < crtc->num_scalers; i++)
5168 skl_detach_scaler(crtc, i);
5169 }
5170
5171 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5172 {
5173 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5174 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5175 enum pipe pipe = crtc->pipe;
5176 const struct intel_crtc_scaler_state *scaler_state =
5177 &crtc_state->scaler_state;
5178
5179 if (crtc_state->pch_pfit.enabled) {
5180 u16 uv_rgb_hphase, uv_rgb_vphase;
5181 int pfit_w, pfit_h, hscale, vscale;
5182 int id;
5183
5184 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5185 return;
5186
5187 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5188 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5189
5190 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5191 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5192
5193 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5194 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5195
5196 id = scaler_state->scaler_id;
5197 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5198 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5199 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5200 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5201 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5202 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5203 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5204 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5205 }
5206 }
5207
5208 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5209 {
5210 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5211 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5212 int pipe = crtc->pipe;
5213
5214 if (crtc_state->pch_pfit.enabled) {
5215 /* Force use of hard-coded filter coefficients
5216 * as some pre-programmed values are broken,
5217 * e.g. x201.
5218 */
5219 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5220 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5221 PF_PIPE_SEL_IVB(pipe));
5222 else
5223 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5224 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5225 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5226 }
5227 }
5228
5229 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5230 {
5231 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5232 struct drm_device *dev = crtc->base.dev;
5233 struct drm_i915_private *dev_priv = to_i915(dev);
5234
5235 if (!crtc_state->ips_enabled)
5236 return;
5237
5238 /*
5239 * We can only enable IPS after we enable a plane and wait for a vblank
5240 * This function is called from post_plane_update, which is run after
5241 * a vblank wait.
5242 */
5243 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5244
5245 if (IS_BROADWELL(dev_priv)) {
5246 mutex_lock(&dev_priv->pcu_lock);
5247 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5248 IPS_ENABLE | IPS_PCODE_CONTROL));
5249 mutex_unlock(&dev_priv->pcu_lock);
5250 /* Quoting Art Runyan: "its not safe to expect any particular
5251 * value in IPS_CTL bit 31 after enabling IPS through the
5252 * mailbox." Moreover, the mailbox may return a bogus state,
5253 * so we need to just enable it and continue on.
5254 */
5255 } else {
5256 I915_WRITE(IPS_CTL, IPS_ENABLE);
5257 /* The bit only becomes 1 in the next vblank, so this wait here
5258 * is essentially intel_wait_for_vblank. If we don't have this
5259 * and don't wait for vblanks until the end of crtc_enable, then
5260 * the HW state readout code will complain that the expected
5261 * IPS_CTL value is not the one we read. */
5262 if (intel_wait_for_register(dev_priv,
5263 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5264 50))
5265 DRM_ERROR("Timed out waiting for IPS enable\n");
5266 }
5267 }
5268
5269 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5270 {
5271 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5272 struct drm_device *dev = crtc->base.dev;
5273 struct drm_i915_private *dev_priv = to_i915(dev);
5274
5275 if (!crtc_state->ips_enabled)
5276 return;
5277
5278 if (IS_BROADWELL(dev_priv)) {
5279 mutex_lock(&dev_priv->pcu_lock);
5280 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5281 mutex_unlock(&dev_priv->pcu_lock);
5282 /*
5283 * Wait for PCODE to finish disabling IPS. The BSpec specified
5284 * 42ms timeout value leads to occasional timeouts so use 100ms
5285 * instead.
5286 */
5287 if (intel_wait_for_register(dev_priv,
5288 IPS_CTL, IPS_ENABLE, 0,
5289 100))
5290 DRM_ERROR("Timed out waiting for IPS disable\n");
5291 } else {
5292 I915_WRITE(IPS_CTL, 0);
5293 POSTING_READ(IPS_CTL);
5294 }
5295
5296 /* We need to wait for a vblank before we can disable the plane. */
5297 intel_wait_for_vblank(dev_priv, crtc->pipe);
5298 }
5299
5300 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5301 {
5302 if (intel_crtc->overlay) {
5303 struct drm_device *dev = intel_crtc->base.dev;
5304
5305 mutex_lock(&dev->struct_mutex);
5306 (void) intel_overlay_switch_off(intel_crtc->overlay);
5307 mutex_unlock(&dev->struct_mutex);
5308 }
5309
5310 /* Let userspace switch the overlay on again. In most cases userspace
5311 * has to recompute where to put it anyway.
5312 */
5313 }
5314
5315 /**
5316 * intel_post_enable_primary - Perform operations after enabling primary plane
5317 * @crtc: the CRTC whose primary plane was just enabled
5318 * @new_crtc_state: the enabling state
5319 *
5320 * Performs potentially sleeping operations that must be done after the primary
5321 * plane is enabled, such as updating FBC and IPS. Note that this may be
5322 * called due to an explicit primary plane update, or due to an implicit
5323 * re-enable that is caused when a sprite plane is updated to no longer
5324 * completely hide the primary plane.
5325 */
5326 static void
5327 intel_post_enable_primary(struct drm_crtc *crtc,
5328 const struct intel_crtc_state *new_crtc_state)
5329 {
5330 struct drm_device *dev = crtc->dev;
5331 struct drm_i915_private *dev_priv = to_i915(dev);
5332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5333 int pipe = intel_crtc->pipe;
5334
5335 /*
5336 * Gen2 reports pipe underruns whenever all planes are disabled.
5337 * So don't enable underrun reporting before at least some planes
5338 * are enabled.
5339 * FIXME: Need to fix the logic to work when we turn off all planes
5340 * but leave the pipe running.
5341 */
5342 if (IS_GEN(dev_priv, 2))
5343 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5344
5345 /* Underruns don't always raise interrupts, so check manually. */
5346 intel_check_cpu_fifo_underruns(dev_priv);
5347 intel_check_pch_fifo_underruns(dev_priv);
5348 }
5349
5350 /* FIXME get rid of this and use pre_plane_update */
5351 static void
5352 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5353 {
5354 struct drm_device *dev = crtc->dev;
5355 struct drm_i915_private *dev_priv = to_i915(dev);
5356 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5357 int pipe = intel_crtc->pipe;
5358
5359 /*
5360 * Gen2 reports pipe underruns whenever all planes are disabled.
5361 * So disable underrun reporting before all the planes get disabled.
5362 */
5363 if (IS_GEN(dev_priv, 2))
5364 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5365
5366 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5367
5368 /*
5369 * Vblank time updates from the shadow to live plane control register
5370 * are blocked if the memory self-refresh mode is active at that
5371 * moment. So to make sure the plane gets truly disabled, disable
5372 * first the self-refresh mode. The self-refresh enable bit in turn
5373 * will be checked/applied by the HW only at the next frame start
5374 * event which is after the vblank start event, so we need to have a
5375 * wait-for-vblank between disabling the plane and the pipe.
5376 */
5377 if (HAS_GMCH(dev_priv) &&
5378 intel_set_memory_cxsr(dev_priv, false))
5379 intel_wait_for_vblank(dev_priv, pipe);
5380 }
5381
5382 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5383 const struct intel_crtc_state *new_crtc_state)
5384 {
5385 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5386 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5387
5388 if (!old_crtc_state->ips_enabled)
5389 return false;
5390
5391 if (needs_modeset(&new_crtc_state->base))
5392 return true;
5393
5394 /*
5395 * Workaround : Do not read or write the pipe palette/gamma data while
5396 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5397 *
5398 * Disable IPS before we program the LUT.
5399 */
5400 if (IS_HASWELL(dev_priv) &&
5401 (new_crtc_state->base.color_mgmt_changed ||
5402 new_crtc_state->update_pipe) &&
5403 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5404 return true;
5405
5406 return !new_crtc_state->ips_enabled;
5407 }
5408
5409 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5410 const struct intel_crtc_state *new_crtc_state)
5411 {
5412 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5414
5415 if (!new_crtc_state->ips_enabled)
5416 return false;
5417
5418 if (needs_modeset(&new_crtc_state->base))
5419 return true;
5420
5421 /*
5422 * Workaround : Do not read or write the pipe palette/gamma data while
5423 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5424 *
5425 * Re-enable IPS after the LUT has been programmed.
5426 */
5427 if (IS_HASWELL(dev_priv) &&
5428 (new_crtc_state->base.color_mgmt_changed ||
5429 new_crtc_state->update_pipe) &&
5430 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5431 return true;
5432
5433 /*
5434 * We can't read out IPS on broadwell, assume the worst and
5435 * forcibly enable IPS on the first fastset.
5436 */
5437 if (new_crtc_state->update_pipe &&
5438 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5439 return true;
5440
5441 return !old_crtc_state->ips_enabled;
5442 }
5443
5444 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5445 const struct intel_crtc_state *crtc_state)
5446 {
5447 if (!crtc_state->nv12_planes)
5448 return false;
5449
5450 /* WA Display #0827: Gen9:all */
5451 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5452 return true;
5453
5454 return false;
5455 }
5456
5457 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5458 {
5459 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5460 struct drm_device *dev = crtc->base.dev;
5461 struct drm_i915_private *dev_priv = to_i915(dev);
5462 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5463 struct intel_crtc_state *pipe_config =
5464 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5465 crtc);
5466 struct drm_plane *primary = crtc->base.primary;
5467 struct drm_plane_state *old_primary_state =
5468 drm_atomic_get_old_plane_state(old_state, primary);
5469
5470 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5471
5472 if (pipe_config->update_wm_post && pipe_config->base.active)
5473 intel_update_watermarks(crtc);
5474
5475 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5476 hsw_enable_ips(pipe_config);
5477
5478 if (old_primary_state) {
5479 struct drm_plane_state *new_primary_state =
5480 drm_atomic_get_new_plane_state(old_state, primary);
5481
5482 intel_fbc_post_update(crtc);
5483
5484 if (new_primary_state->visible &&
5485 (needs_modeset(&pipe_config->base) ||
5486 !old_primary_state->visible))
5487 intel_post_enable_primary(&crtc->base, pipe_config);
5488 }
5489
5490 /* Display WA 827 */
5491 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5492 !needs_nv12_wa(dev_priv, pipe_config)) {
5493 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5494 }
5495 }
5496
5497 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5498 struct intel_crtc_state *pipe_config)
5499 {
5500 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5501 struct drm_device *dev = crtc->base.dev;
5502 struct drm_i915_private *dev_priv = to_i915(dev);
5503 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5504 struct drm_plane *primary = crtc->base.primary;
5505 struct drm_plane_state *old_primary_state =
5506 drm_atomic_get_old_plane_state(old_state, primary);
5507 bool modeset = needs_modeset(&pipe_config->base);
5508 struct intel_atomic_state *old_intel_state =
5509 to_intel_atomic_state(old_state);
5510
5511 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5512 hsw_disable_ips(old_crtc_state);
5513
5514 if (old_primary_state) {
5515 struct intel_plane_state *new_primary_state =
5516 intel_atomic_get_new_plane_state(old_intel_state,
5517 to_intel_plane(primary));
5518
5519 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5520 /*
5521 * Gen2 reports pipe underruns whenever all planes are disabled.
5522 * So disable underrun reporting before all the planes get disabled.
5523 */
5524 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5525 (modeset || !new_primary_state->base.visible))
5526 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5527 }
5528
5529 /* Display WA 827 */
5530 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5531 needs_nv12_wa(dev_priv, pipe_config)) {
5532 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5533 }
5534
5535 /*
5536 * Vblank time updates from the shadow to live plane control register
5537 * are blocked if the memory self-refresh mode is active at that
5538 * moment. So to make sure the plane gets truly disabled, disable
5539 * first the self-refresh mode. The self-refresh enable bit in turn
5540 * will be checked/applied by the HW only at the next frame start
5541 * event which is after the vblank start event, so we need to have a
5542 * wait-for-vblank between disabling the plane and the pipe.
5543 */
5544 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5545 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5546 intel_wait_for_vblank(dev_priv, crtc->pipe);
5547
5548 /*
5549 * IVB workaround: must disable low power watermarks for at least
5550 * one frame before enabling scaling. LP watermarks can be re-enabled
5551 * when scaling is disabled.
5552 *
5553 * WaCxSRDisabledForSpriteScaling:ivb
5554 */
5555 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5556 old_crtc_state->base.active)
5557 intel_wait_for_vblank(dev_priv, crtc->pipe);
5558
5559 /*
5560 * If we're doing a modeset, we're done. No need to do any pre-vblank
5561 * watermark programming here.
5562 */
5563 if (needs_modeset(&pipe_config->base))
5564 return;
5565
5566 /*
5567 * For platforms that support atomic watermarks, program the
5568 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5569 * will be the intermediate values that are safe for both pre- and
5570 * post- vblank; when vblank happens, the 'active' values will be set
5571 * to the final 'target' values and we'll do this again to get the
5572 * optimal watermarks. For gen9+ platforms, the values we program here
5573 * will be the final target values which will get automatically latched
5574 * at vblank time; no further programming will be necessary.
5575 *
5576 * If a platform hasn't been transitioned to atomic watermarks yet,
5577 * we'll continue to update watermarks the old way, if flags tell
5578 * us to.
5579 */
5580 if (dev_priv->display.initial_watermarks != NULL)
5581 dev_priv->display.initial_watermarks(old_intel_state,
5582 pipe_config);
5583 else if (pipe_config->update_wm_pre)
5584 intel_update_watermarks(crtc);
5585 }
5586
5587 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5588 struct intel_crtc *crtc)
5589 {
5590 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5591 const struct intel_crtc_state *new_crtc_state =
5592 intel_atomic_get_new_crtc_state(state, crtc);
5593 unsigned int update_mask = new_crtc_state->update_planes;
5594 const struct intel_plane_state *old_plane_state;
5595 struct intel_plane *plane;
5596 unsigned fb_bits = 0;
5597 int i;
5598
5599 intel_crtc_dpms_overlay_disable(crtc);
5600
5601 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5602 if (crtc->pipe != plane->pipe ||
5603 !(update_mask & BIT(plane->id)))
5604 continue;
5605
5606 plane->disable_plane(plane, new_crtc_state);
5607
5608 if (old_plane_state->base.visible)
5609 fb_bits |= plane->frontbuffer_bit;
5610 }
5611
5612 intel_frontbuffer_flip(dev_priv, fb_bits);
5613 }
5614
5615 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5616 struct intel_crtc_state *crtc_state,
5617 struct drm_atomic_state *old_state)
5618 {
5619 struct drm_connector_state *conn_state;
5620 struct drm_connector *conn;
5621 int i;
5622
5623 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5624 struct intel_encoder *encoder =
5625 to_intel_encoder(conn_state->best_encoder);
5626
5627 if (conn_state->crtc != crtc)
5628 continue;
5629
5630 if (encoder->pre_pll_enable)
5631 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5632 }
5633 }
5634
5635 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5636 struct intel_crtc_state *crtc_state,
5637 struct drm_atomic_state *old_state)
5638 {
5639 struct drm_connector_state *conn_state;
5640 struct drm_connector *conn;
5641 int i;
5642
5643 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5644 struct intel_encoder *encoder =
5645 to_intel_encoder(conn_state->best_encoder);
5646
5647 if (conn_state->crtc != crtc)
5648 continue;
5649
5650 if (encoder->pre_enable)
5651 encoder->pre_enable(encoder, crtc_state, conn_state);
5652 }
5653 }
5654
5655 static void intel_encoders_enable(struct drm_crtc *crtc,
5656 struct intel_crtc_state *crtc_state,
5657 struct drm_atomic_state *old_state)
5658 {
5659 struct drm_connector_state *conn_state;
5660 struct drm_connector *conn;
5661 int i;
5662
5663 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5664 struct intel_encoder *encoder =
5665 to_intel_encoder(conn_state->best_encoder);
5666
5667 if (conn_state->crtc != crtc)
5668 continue;
5669
5670 if (encoder->enable)
5671 encoder->enable(encoder, crtc_state, conn_state);
5672 intel_opregion_notify_encoder(encoder, true);
5673 }
5674 }
5675
5676 static void intel_encoders_disable(struct drm_crtc *crtc,
5677 struct intel_crtc_state *old_crtc_state,
5678 struct drm_atomic_state *old_state)
5679 {
5680 struct drm_connector_state *old_conn_state;
5681 struct drm_connector *conn;
5682 int i;
5683
5684 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5685 struct intel_encoder *encoder =
5686 to_intel_encoder(old_conn_state->best_encoder);
5687
5688 if (old_conn_state->crtc != crtc)
5689 continue;
5690
5691 intel_opregion_notify_encoder(encoder, false);
5692 if (encoder->disable)
5693 encoder->disable(encoder, old_crtc_state, old_conn_state);
5694 }
5695 }
5696
5697 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5698 struct intel_crtc_state *old_crtc_state,
5699 struct drm_atomic_state *old_state)
5700 {
5701 struct drm_connector_state *old_conn_state;
5702 struct drm_connector *conn;
5703 int i;
5704
5705 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5706 struct intel_encoder *encoder =
5707 to_intel_encoder(old_conn_state->best_encoder);
5708
5709 if (old_conn_state->crtc != crtc)
5710 continue;
5711
5712 if (encoder->post_disable)
5713 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5714 }
5715 }
5716
5717 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5718 struct intel_crtc_state *old_crtc_state,
5719 struct drm_atomic_state *old_state)
5720 {
5721 struct drm_connector_state *old_conn_state;
5722 struct drm_connector *conn;
5723 int i;
5724
5725 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5726 struct intel_encoder *encoder =
5727 to_intel_encoder(old_conn_state->best_encoder);
5728
5729 if (old_conn_state->crtc != crtc)
5730 continue;
5731
5732 if (encoder->post_pll_disable)
5733 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5734 }
5735 }
5736
5737 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5738 struct intel_crtc_state *crtc_state,
5739 struct drm_atomic_state *old_state)
5740 {
5741 struct drm_connector_state *conn_state;
5742 struct drm_connector *conn;
5743 int i;
5744
5745 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5746 struct intel_encoder *encoder =
5747 to_intel_encoder(conn_state->best_encoder);
5748
5749 if (conn_state->crtc != crtc)
5750 continue;
5751
5752 if (encoder->update_pipe)
5753 encoder->update_pipe(encoder, crtc_state, conn_state);
5754 }
5755 }
5756
5757 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5758 struct drm_atomic_state *old_state)
5759 {
5760 struct drm_crtc *crtc = pipe_config->base.crtc;
5761 struct drm_device *dev = crtc->dev;
5762 struct drm_i915_private *dev_priv = to_i915(dev);
5763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5764 int pipe = intel_crtc->pipe;
5765 struct intel_atomic_state *old_intel_state =
5766 to_intel_atomic_state(old_state);
5767
5768 if (WARN_ON(intel_crtc->active))
5769 return;
5770
5771 /*
5772 * Sometimes spurious CPU pipe underruns happen during FDI
5773 * training, at least with VGA+HDMI cloning. Suppress them.
5774 *
5775 * On ILK we get an occasional spurious CPU pipe underruns
5776 * between eDP port A enable and vdd enable. Also PCH port
5777 * enable seems to result in the occasional CPU pipe underrun.
5778 *
5779 * Spurious PCH underruns also occur during PCH enabling.
5780 */
5781 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5782 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5783
5784 if (pipe_config->has_pch_encoder)
5785 intel_prepare_shared_dpll(pipe_config);
5786
5787 if (intel_crtc_has_dp_encoder(pipe_config))
5788 intel_dp_set_m_n(pipe_config, M1_N1);
5789
5790 intel_set_pipe_timings(pipe_config);
5791 intel_set_pipe_src_size(pipe_config);
5792
5793 if (pipe_config->has_pch_encoder) {
5794 intel_cpu_transcoder_set_m_n(pipe_config,
5795 &pipe_config->fdi_m_n, NULL);
5796 }
5797
5798 ironlake_set_pipeconf(pipe_config);
5799
5800 intel_crtc->active = true;
5801
5802 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5803
5804 if (pipe_config->has_pch_encoder) {
5805 /* Note: FDI PLL enabling _must_ be done before we enable the
5806 * cpu pipes, hence this is separate from all the other fdi/pch
5807 * enabling. */
5808 ironlake_fdi_pll_enable(pipe_config);
5809 } else {
5810 assert_fdi_tx_disabled(dev_priv, pipe);
5811 assert_fdi_rx_disabled(dev_priv, pipe);
5812 }
5813
5814 ironlake_pfit_enable(pipe_config);
5815
5816 /*
5817 * On ILK+ LUT must be loaded before the pipe is running but with
5818 * clocks enabled
5819 */
5820 intel_color_load_luts(pipe_config);
5821 intel_color_commit(pipe_config);
5822
5823 if (dev_priv->display.initial_watermarks != NULL)
5824 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5825 intel_enable_pipe(pipe_config);
5826
5827 if (pipe_config->has_pch_encoder)
5828 ironlake_pch_enable(old_intel_state, pipe_config);
5829
5830 assert_vblank_disabled(crtc);
5831 intel_crtc_vblank_on(pipe_config);
5832
5833 intel_encoders_enable(crtc, pipe_config, old_state);
5834
5835 if (HAS_PCH_CPT(dev_priv))
5836 cpt_verify_modeset(dev, intel_crtc->pipe);
5837
5838 /*
5839 * Must wait for vblank to avoid spurious PCH FIFO underruns.
5840 * And a second vblank wait is needed at least on ILK with
5841 * some interlaced HDMI modes. Let's do the double wait always
5842 * in case there are more corner cases we don't know about.
5843 */
5844 if (pipe_config->has_pch_encoder) {
5845 intel_wait_for_vblank(dev_priv, pipe);
5846 intel_wait_for_vblank(dev_priv, pipe);
5847 }
5848 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5849 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5850 }
5851
5852 /* IPS only exists on ULT machines and is tied to pipe A. */
5853 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5854 {
5855 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5856 }
5857
5858 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5859 enum pipe pipe, bool apply)
5860 {
5861 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5862 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5863
5864 if (apply)
5865 val |= mask;
5866 else
5867 val &= ~mask;
5868
5869 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5870 }
5871
5872 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5873 {
5874 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5875 enum pipe pipe = crtc->pipe;
5876 u32 val;
5877
5878 val = MBUS_DBOX_A_CREDIT(2);
5879 val |= MBUS_DBOX_BW_CREDIT(1);
5880 val |= MBUS_DBOX_B_CREDIT(8);
5881
5882 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5883 }
5884
5885 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5886 struct drm_atomic_state *old_state)
5887 {
5888 struct drm_crtc *crtc = pipe_config->base.crtc;
5889 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5890 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5891 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5892 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5893 struct intel_atomic_state *old_intel_state =
5894 to_intel_atomic_state(old_state);
5895 bool psl_clkgate_wa;
5896
5897 if (WARN_ON(intel_crtc->active))
5898 return;
5899
5900 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5901
5902 if (pipe_config->shared_dpll)
5903 intel_enable_shared_dpll(pipe_config);
5904
5905 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5906
5907 if (intel_crtc_has_dp_encoder(pipe_config))
5908 intel_dp_set_m_n(pipe_config, M1_N1);
5909
5910 if (!transcoder_is_dsi(cpu_transcoder))
5911 intel_set_pipe_timings(pipe_config);
5912
5913 intel_set_pipe_src_size(pipe_config);
5914
5915 if (cpu_transcoder != TRANSCODER_EDP &&
5916 !transcoder_is_dsi(cpu_transcoder)) {
5917 I915_WRITE(PIPE_MULT(cpu_transcoder),
5918 pipe_config->pixel_multiplier - 1);
5919 }
5920
5921 if (pipe_config->has_pch_encoder) {
5922 intel_cpu_transcoder_set_m_n(pipe_config,
5923 &pipe_config->fdi_m_n, NULL);
5924 }
5925
5926 if (!transcoder_is_dsi(cpu_transcoder))
5927 haswell_set_pipeconf(pipe_config);
5928
5929 haswell_set_pipemisc(pipe_config);
5930
5931 intel_crtc->active = true;
5932
5933 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5934 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5935 pipe_config->pch_pfit.enabled;
5936 if (psl_clkgate_wa)
5937 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5938
5939 if (INTEL_GEN(dev_priv) >= 9)
5940 skylake_pfit_enable(pipe_config);
5941 else
5942 ironlake_pfit_enable(pipe_config);
5943
5944 /*
5945 * On ILK+ LUT must be loaded before the pipe is running but with
5946 * clocks enabled
5947 */
5948 intel_color_load_luts(pipe_config);
5949 intel_color_commit(pipe_config);
5950
5951 if (INTEL_GEN(dev_priv) >= 11)
5952 icl_set_pipe_chicken(intel_crtc);
5953
5954 intel_ddi_set_pipe_settings(pipe_config);
5955 if (!transcoder_is_dsi(cpu_transcoder))
5956 intel_ddi_enable_transcoder_func(pipe_config);
5957
5958 if (dev_priv->display.initial_watermarks != NULL)
5959 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5960
5961 if (INTEL_GEN(dev_priv) >= 11)
5962 icl_pipe_mbus_enable(intel_crtc);
5963
5964 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5965 if (!transcoder_is_dsi(cpu_transcoder))
5966 intel_enable_pipe(pipe_config);
5967
5968 if (pipe_config->has_pch_encoder)
5969 lpt_pch_enable(old_intel_state, pipe_config);
5970
5971 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
5972 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5973
5974 assert_vblank_disabled(crtc);
5975 intel_crtc_vblank_on(pipe_config);
5976
5977 intel_encoders_enable(crtc, pipe_config, old_state);
5978
5979 if (psl_clkgate_wa) {
5980 intel_wait_for_vblank(dev_priv, pipe);
5981 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5982 }
5983
5984 /* If we change the relative order between pipe/planes enabling, we need
5985 * to change the workaround. */
5986 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5987 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5988 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5989 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5990 }
5991 }
5992
5993 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
5994 {
5995 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5996 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5997 enum pipe pipe = crtc->pipe;
5998
5999 /* To avoid upsetting the power well on haswell only disable the pfit if
6000 * it's in use. The hw state code will make sure we get this right. */
6001 if (old_crtc_state->pch_pfit.enabled) {
6002 I915_WRITE(PF_CTL(pipe), 0);
6003 I915_WRITE(PF_WIN_POS(pipe), 0);
6004 I915_WRITE(PF_WIN_SZ(pipe), 0);
6005 }
6006 }
6007
6008 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6009 struct drm_atomic_state *old_state)
6010 {
6011 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6012 struct drm_device *dev = crtc->dev;
6013 struct drm_i915_private *dev_priv = to_i915(dev);
6014 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6015 int pipe = intel_crtc->pipe;
6016
6017 /*
6018 * Sometimes spurious CPU pipe underruns happen when the
6019 * pipe is already disabled, but FDI RX/TX is still enabled.
6020 * Happens at least with VGA+HDMI cloning. Suppress them.
6021 */
6022 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6023 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6024
6025 intel_encoders_disable(crtc, old_crtc_state, old_state);
6026
6027 drm_crtc_vblank_off(crtc);
6028 assert_vblank_disabled(crtc);
6029
6030 intel_disable_pipe(old_crtc_state);
6031
6032 ironlake_pfit_disable(old_crtc_state);
6033
6034 if (old_crtc_state->has_pch_encoder)
6035 ironlake_fdi_disable(crtc);
6036
6037 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6038
6039 if (old_crtc_state->has_pch_encoder) {
6040 ironlake_disable_pch_transcoder(dev_priv, pipe);
6041
6042 if (HAS_PCH_CPT(dev_priv)) {
6043 i915_reg_t reg;
6044 u32 temp;
6045
6046 /* disable TRANS_DP_CTL */
6047 reg = TRANS_DP_CTL(pipe);
6048 temp = I915_READ(reg);
6049 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6050 TRANS_DP_PORT_SEL_MASK);
6051 temp |= TRANS_DP_PORT_SEL_NONE;
6052 I915_WRITE(reg, temp);
6053
6054 /* disable DPLL_SEL */
6055 temp = I915_READ(PCH_DPLL_SEL);
6056 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6057 I915_WRITE(PCH_DPLL_SEL, temp);
6058 }
6059
6060 ironlake_fdi_pll_disable(intel_crtc);
6061 }
6062
6063 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6064 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6065 }
6066
6067 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6068 struct drm_atomic_state *old_state)
6069 {
6070 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6071 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6073 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6074
6075 intel_encoders_disable(crtc, old_crtc_state, old_state);
6076
6077 drm_crtc_vblank_off(crtc);
6078 assert_vblank_disabled(crtc);
6079
6080 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6081 if (!transcoder_is_dsi(cpu_transcoder))
6082 intel_disable_pipe(old_crtc_state);
6083
6084 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6085 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6086
6087 if (!transcoder_is_dsi(cpu_transcoder))
6088 intel_ddi_disable_transcoder_func(old_crtc_state);
6089
6090 intel_dsc_disable(old_crtc_state);
6091
6092 if (INTEL_GEN(dev_priv) >= 9)
6093 skylake_scaler_disable(intel_crtc);
6094 else
6095 ironlake_pfit_disable(old_crtc_state);
6096
6097 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6098
6099 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6100 }
6101
6102 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6103 {
6104 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6105 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6106
6107 if (!crtc_state->gmch_pfit.control)
6108 return;
6109
6110 /*
6111 * The panel fitter should only be adjusted whilst the pipe is disabled,
6112 * according to register description and PRM.
6113 */
6114 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6115 assert_pipe_disabled(dev_priv, crtc->pipe);
6116
6117 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6118 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6119
6120 /* Border color in case we don't scale up to the full screen. Black by
6121 * default, change to something else for debugging. */
6122 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6123 }
6124
6125 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6126 {
6127 if (port == PORT_NONE)
6128 return false;
6129
6130 if (IS_ICELAKE(dev_priv))
6131 return port <= PORT_B;
6132
6133 return false;
6134 }
6135
6136 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6137 {
6138 if (IS_ICELAKE(dev_priv))
6139 return port >= PORT_C && port <= PORT_F;
6140
6141 return false;
6142 }
6143
6144 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6145 {
6146 if (!intel_port_is_tc(dev_priv, port))
6147 return PORT_TC_NONE;
6148
6149 return port - PORT_C;
6150 }
6151
6152 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6153 {
6154 switch (port) {
6155 case PORT_A:
6156 return POWER_DOMAIN_PORT_DDI_A_LANES;
6157 case PORT_B:
6158 return POWER_DOMAIN_PORT_DDI_B_LANES;
6159 case PORT_C:
6160 return POWER_DOMAIN_PORT_DDI_C_LANES;
6161 case PORT_D:
6162 return POWER_DOMAIN_PORT_DDI_D_LANES;
6163 case PORT_E:
6164 return POWER_DOMAIN_PORT_DDI_E_LANES;
6165 case PORT_F:
6166 return POWER_DOMAIN_PORT_DDI_F_LANES;
6167 default:
6168 MISSING_CASE(port);
6169 return POWER_DOMAIN_PORT_OTHER;
6170 }
6171 }
6172
6173 enum intel_display_power_domain
6174 intel_aux_power_domain(struct intel_digital_port *dig_port)
6175 {
6176 switch (dig_port->aux_ch) {
6177 case AUX_CH_A:
6178 return POWER_DOMAIN_AUX_A;
6179 case AUX_CH_B:
6180 return POWER_DOMAIN_AUX_B;
6181 case AUX_CH_C:
6182 return POWER_DOMAIN_AUX_C;
6183 case AUX_CH_D:
6184 return POWER_DOMAIN_AUX_D;
6185 case AUX_CH_E:
6186 return POWER_DOMAIN_AUX_E;
6187 case AUX_CH_F:
6188 return POWER_DOMAIN_AUX_F;
6189 default:
6190 MISSING_CASE(dig_port->aux_ch);
6191 return POWER_DOMAIN_AUX_A;
6192 }
6193 }
6194
6195 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6196 struct intel_crtc_state *crtc_state)
6197 {
6198 struct drm_device *dev = crtc->dev;
6199 struct drm_i915_private *dev_priv = to_i915(dev);
6200 struct drm_encoder *encoder;
6201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6202 enum pipe pipe = intel_crtc->pipe;
6203 u64 mask;
6204 enum transcoder transcoder = crtc_state->cpu_transcoder;
6205
6206 if (!crtc_state->base.active)
6207 return 0;
6208
6209 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6210 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6211 if (crtc_state->pch_pfit.enabled ||
6212 crtc_state->pch_pfit.force_thru)
6213 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6214
6215 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6216 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6217
6218 mask |= BIT_ULL(intel_encoder->power_domain);
6219 }
6220
6221 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6222 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6223
6224 if (crtc_state->shared_dpll)
6225 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6226
6227 return mask;
6228 }
6229
6230 static u64
6231 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6232 struct intel_crtc_state *crtc_state)
6233 {
6234 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6235 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6236 enum intel_display_power_domain domain;
6237 u64 domains, new_domains, old_domains;
6238
6239 old_domains = intel_crtc->enabled_power_domains;
6240 intel_crtc->enabled_power_domains = new_domains =
6241 get_crtc_power_domains(crtc, crtc_state);
6242
6243 domains = new_domains & ~old_domains;
6244
6245 for_each_power_domain(domain, domains)
6246 intel_display_power_get(dev_priv, domain);
6247
6248 return old_domains & ~new_domains;
6249 }
6250
6251 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6252 u64 domains)
6253 {
6254 enum intel_display_power_domain domain;
6255
6256 for_each_power_domain(domain, domains)
6257 intel_display_power_put_unchecked(dev_priv, domain);
6258 }
6259
6260 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6261 struct drm_atomic_state *old_state)
6262 {
6263 struct intel_atomic_state *old_intel_state =
6264 to_intel_atomic_state(old_state);
6265 struct drm_crtc *crtc = pipe_config->base.crtc;
6266 struct drm_device *dev = crtc->dev;
6267 struct drm_i915_private *dev_priv = to_i915(dev);
6268 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6269 int pipe = intel_crtc->pipe;
6270
6271 if (WARN_ON(intel_crtc->active))
6272 return;
6273
6274 if (intel_crtc_has_dp_encoder(pipe_config))
6275 intel_dp_set_m_n(pipe_config, M1_N1);
6276
6277 intel_set_pipe_timings(pipe_config);
6278 intel_set_pipe_src_size(pipe_config);
6279
6280 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6281 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6282 I915_WRITE(CHV_CANVAS(pipe), 0);
6283 }
6284
6285 i9xx_set_pipeconf(pipe_config);
6286
6287 intel_crtc->active = true;
6288
6289 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6290
6291 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6292
6293 if (IS_CHERRYVIEW(dev_priv)) {
6294 chv_prepare_pll(intel_crtc, pipe_config);
6295 chv_enable_pll(intel_crtc, pipe_config);
6296 } else {
6297 vlv_prepare_pll(intel_crtc, pipe_config);
6298 vlv_enable_pll(intel_crtc, pipe_config);
6299 }
6300
6301 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6302
6303 i9xx_pfit_enable(pipe_config);
6304
6305 intel_color_load_luts(pipe_config);
6306 intel_color_commit(pipe_config);
6307
6308 dev_priv->display.initial_watermarks(old_intel_state,
6309 pipe_config);
6310 intel_enable_pipe(pipe_config);
6311
6312 assert_vblank_disabled(crtc);
6313 intel_crtc_vblank_on(pipe_config);
6314
6315 intel_encoders_enable(crtc, pipe_config, old_state);
6316 }
6317
6318 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6319 {
6320 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6321 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6322
6323 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6324 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6325 }
6326
6327 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6328 struct drm_atomic_state *old_state)
6329 {
6330 struct intel_atomic_state *old_intel_state =
6331 to_intel_atomic_state(old_state);
6332 struct drm_crtc *crtc = pipe_config->base.crtc;
6333 struct drm_device *dev = crtc->dev;
6334 struct drm_i915_private *dev_priv = to_i915(dev);
6335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6336 enum pipe pipe = intel_crtc->pipe;
6337
6338 if (WARN_ON(intel_crtc->active))
6339 return;
6340
6341 i9xx_set_pll_dividers(pipe_config);
6342
6343 if (intel_crtc_has_dp_encoder(pipe_config))
6344 intel_dp_set_m_n(pipe_config, M1_N1);
6345
6346 intel_set_pipe_timings(pipe_config);
6347 intel_set_pipe_src_size(pipe_config);
6348
6349 i9xx_set_pipeconf(pipe_config);
6350
6351 intel_crtc->active = true;
6352
6353 if (!IS_GEN(dev_priv, 2))
6354 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6355
6356 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6357
6358 i9xx_enable_pll(intel_crtc, pipe_config);
6359
6360 i9xx_pfit_enable(pipe_config);
6361
6362 intel_color_load_luts(pipe_config);
6363 intel_color_commit(pipe_config);
6364
6365 if (dev_priv->display.initial_watermarks != NULL)
6366 dev_priv->display.initial_watermarks(old_intel_state,
6367 pipe_config);
6368 else
6369 intel_update_watermarks(intel_crtc);
6370 intel_enable_pipe(pipe_config);
6371
6372 assert_vblank_disabled(crtc);
6373 intel_crtc_vblank_on(pipe_config);
6374
6375 intel_encoders_enable(crtc, pipe_config, old_state);
6376 }
6377
6378 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6379 {
6380 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6381 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6382
6383 if (!old_crtc_state->gmch_pfit.control)
6384 return;
6385
6386 assert_pipe_disabled(dev_priv, crtc->pipe);
6387
6388 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6389 I915_READ(PFIT_CONTROL));
6390 I915_WRITE(PFIT_CONTROL, 0);
6391 }
6392
6393 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6394 struct drm_atomic_state *old_state)
6395 {
6396 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6397 struct drm_device *dev = crtc->dev;
6398 struct drm_i915_private *dev_priv = to_i915(dev);
6399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6400 int pipe = intel_crtc->pipe;
6401
6402 /*
6403 * On gen2 planes are double buffered but the pipe isn't, so we must
6404 * wait for planes to fully turn off before disabling the pipe.
6405 */
6406 if (IS_GEN(dev_priv, 2))
6407 intel_wait_for_vblank(dev_priv, pipe);
6408
6409 intel_encoders_disable(crtc, old_crtc_state, old_state);
6410
6411 drm_crtc_vblank_off(crtc);
6412 assert_vblank_disabled(crtc);
6413
6414 intel_disable_pipe(old_crtc_state);
6415
6416 i9xx_pfit_disable(old_crtc_state);
6417
6418 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6419
6420 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6421 if (IS_CHERRYVIEW(dev_priv))
6422 chv_disable_pll(dev_priv, pipe);
6423 else if (IS_VALLEYVIEW(dev_priv))
6424 vlv_disable_pll(dev_priv, pipe);
6425 else
6426 i9xx_disable_pll(old_crtc_state);
6427 }
6428
6429 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6430
6431 if (!IS_GEN(dev_priv, 2))
6432 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6433
6434 if (!dev_priv->display.initial_watermarks)
6435 intel_update_watermarks(intel_crtc);
6436
6437 /* clock the pipe down to 640x480@60 to potentially save power */
6438 if (IS_I830(dev_priv))
6439 i830_enable_pipe(dev_priv, pipe);
6440 }
6441
6442 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6443 struct drm_modeset_acquire_ctx *ctx)
6444 {
6445 struct intel_encoder *encoder;
6446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6447 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6448 enum intel_display_power_domain domain;
6449 struct intel_plane *plane;
6450 u64 domains;
6451 struct drm_atomic_state *state;
6452 struct intel_crtc_state *crtc_state;
6453 int ret;
6454
6455 if (!intel_crtc->active)
6456 return;
6457
6458 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6459 const struct intel_plane_state *plane_state =
6460 to_intel_plane_state(plane->base.state);
6461
6462 if (plane_state->base.visible)
6463 intel_plane_disable_noatomic(intel_crtc, plane);
6464 }
6465
6466 state = drm_atomic_state_alloc(crtc->dev);
6467 if (!state) {
6468 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6469 crtc->base.id, crtc->name);
6470 return;
6471 }
6472
6473 state->acquire_ctx = ctx;
6474
6475 /* Everything's already locked, -EDEADLK can't happen. */
6476 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6477 ret = drm_atomic_add_affected_connectors(state, crtc);
6478
6479 WARN_ON(IS_ERR(crtc_state) || ret);
6480
6481 dev_priv->display.crtc_disable(crtc_state, state);
6482
6483 drm_atomic_state_put(state);
6484
6485 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6486 crtc->base.id, crtc->name);
6487
6488 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6489 crtc->state->active = false;
6490 intel_crtc->active = false;
6491 crtc->enabled = false;
6492 crtc->state->connector_mask = 0;
6493 crtc->state->encoder_mask = 0;
6494
6495 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6496 encoder->base.crtc = NULL;
6497
6498 intel_fbc_disable(intel_crtc);
6499 intel_update_watermarks(intel_crtc);
6500 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6501
6502 domains = intel_crtc->enabled_power_domains;
6503 for_each_power_domain(domain, domains)
6504 intel_display_power_put_unchecked(dev_priv, domain);
6505 intel_crtc->enabled_power_domains = 0;
6506
6507 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6508 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6509 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6510 }
6511
6512 /*
6513 * turn all crtc's off, but do not adjust state
6514 * This has to be paired with a call to intel_modeset_setup_hw_state.
6515 */
6516 int intel_display_suspend(struct drm_device *dev)
6517 {
6518 struct drm_i915_private *dev_priv = to_i915(dev);
6519 struct drm_atomic_state *state;
6520 int ret;
6521
6522 state = drm_atomic_helper_suspend(dev);
6523 ret = PTR_ERR_OR_ZERO(state);
6524 if (ret)
6525 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6526 else
6527 dev_priv->modeset_restore_state = state;
6528 return ret;
6529 }
6530
6531 void intel_encoder_destroy(struct drm_encoder *encoder)
6532 {
6533 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6534
6535 drm_encoder_cleanup(encoder);
6536 kfree(intel_encoder);
6537 }
6538
6539 /* Cross check the actual hw state with our own modeset state tracking (and it's
6540 * internal consistency). */
6541 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6542 struct drm_connector_state *conn_state)
6543 {
6544 struct intel_connector *connector = to_intel_connector(conn_state->connector);
6545
6546 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6547 connector->base.base.id,
6548 connector->base.name);
6549
6550 if (connector->get_hw_state(connector)) {
6551 struct intel_encoder *encoder = connector->encoder;
6552
6553 I915_STATE_WARN(!crtc_state,
6554 "connector enabled without attached crtc\n");
6555
6556 if (!crtc_state)
6557 return;
6558
6559 I915_STATE_WARN(!crtc_state->active,
6560 "connector is active, but attached crtc isn't\n");
6561
6562 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6563 return;
6564
6565 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6566 "atomic encoder doesn't match attached encoder\n");
6567
6568 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6569 "attached encoder crtc differs from connector crtc\n");
6570 } else {
6571 I915_STATE_WARN(crtc_state && crtc_state->active,
6572 "attached crtc is active, but connector isn't\n");
6573 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6574 "best encoder set without crtc!\n");
6575 }
6576 }
6577
6578 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6579 {
6580 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6581 return crtc_state->fdi_lanes;
6582
6583 return 0;
6584 }
6585
6586 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6587 struct intel_crtc_state *pipe_config)
6588 {
6589 struct drm_i915_private *dev_priv = to_i915(dev);
6590 struct drm_atomic_state *state = pipe_config->base.state;
6591 struct intel_crtc *other_crtc;
6592 struct intel_crtc_state *other_crtc_state;
6593
6594 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6595 pipe_name(pipe), pipe_config->fdi_lanes);
6596 if (pipe_config->fdi_lanes > 4) {
6597 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6598 pipe_name(pipe), pipe_config->fdi_lanes);
6599 return -EINVAL;
6600 }
6601
6602 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6603 if (pipe_config->fdi_lanes > 2) {
6604 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6605 pipe_config->fdi_lanes);
6606 return -EINVAL;
6607 } else {
6608 return 0;
6609 }
6610 }
6611
6612 if (INTEL_INFO(dev_priv)->num_pipes == 2)
6613 return 0;
6614
6615 /* Ivybridge 3 pipe is really complicated */
6616 switch (pipe) {
6617 case PIPE_A:
6618 return 0;
6619 case PIPE_B:
6620 if (pipe_config->fdi_lanes <= 2)
6621 return 0;
6622
6623 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6624 other_crtc_state =
6625 intel_atomic_get_crtc_state(state, other_crtc);
6626 if (IS_ERR(other_crtc_state))
6627 return PTR_ERR(other_crtc_state);
6628
6629 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6630 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6631 pipe_name(pipe), pipe_config->fdi_lanes);
6632 return -EINVAL;
6633 }
6634 return 0;
6635 case PIPE_C:
6636 if (pipe_config->fdi_lanes > 2) {
6637 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6638 pipe_name(pipe), pipe_config->fdi_lanes);
6639 return -EINVAL;
6640 }
6641
6642 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6643 other_crtc_state =
6644 intel_atomic_get_crtc_state(state, other_crtc);
6645 if (IS_ERR(other_crtc_state))
6646 return PTR_ERR(other_crtc_state);
6647
6648 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6649 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6650 return -EINVAL;
6651 }
6652 return 0;
6653 default:
6654 BUG();
6655 }
6656 }
6657
6658 #define RETRY 1
6659 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6660 struct intel_crtc_state *pipe_config)
6661 {
6662 struct drm_device *dev = intel_crtc->base.dev;
6663 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6664 int lane, link_bw, fdi_dotclock, ret;
6665 bool needs_recompute = false;
6666
6667 retry:
6668 /* FDI is a binary signal running at ~2.7GHz, encoding
6669 * each output octet as 10 bits. The actual frequency
6670 * is stored as a divider into a 100MHz clock, and the
6671 * mode pixel clock is stored in units of 1KHz.
6672 * Hence the bw of each lane in terms of the mode signal
6673 * is:
6674 */
6675 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6676
6677 fdi_dotclock = adjusted_mode->crtc_clock;
6678
6679 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6680 pipe_config->pipe_bpp);
6681
6682 pipe_config->fdi_lanes = lane;
6683
6684 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6685 link_bw, &pipe_config->fdi_m_n, false);
6686
6687 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6688 if (ret == -EDEADLK)
6689 return ret;
6690
6691 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6692 pipe_config->pipe_bpp -= 2*3;
6693 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6694 pipe_config->pipe_bpp);
6695 needs_recompute = true;
6696 pipe_config->bw_constrained = true;
6697
6698 goto retry;
6699 }
6700
6701 if (needs_recompute)
6702 return RETRY;
6703
6704 return ret;
6705 }
6706
6707 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6708 {
6709 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6710 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6711
6712 /* IPS only exists on ULT machines and is tied to pipe A. */
6713 if (!hsw_crtc_supports_ips(crtc))
6714 return false;
6715
6716 if (!i915_modparams.enable_ips)
6717 return false;
6718
6719 if (crtc_state->pipe_bpp > 24)
6720 return false;
6721
6722 /*
6723 * We compare against max which means we must take
6724 * the increased cdclk requirement into account when
6725 * calculating the new cdclk.
6726 *
6727 * Should measure whether using a lower cdclk w/o IPS
6728 */
6729 if (IS_BROADWELL(dev_priv) &&
6730 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6731 return false;
6732
6733 return true;
6734 }
6735
6736 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6737 {
6738 struct drm_i915_private *dev_priv =
6739 to_i915(crtc_state->base.crtc->dev);
6740 struct intel_atomic_state *intel_state =
6741 to_intel_atomic_state(crtc_state->base.state);
6742
6743 if (!hsw_crtc_state_ips_capable(crtc_state))
6744 return false;
6745
6746 if (crtc_state->ips_force_disable)
6747 return false;
6748
6749 /* IPS should be fine as long as at least one plane is enabled. */
6750 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6751 return false;
6752
6753 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6754 if (IS_BROADWELL(dev_priv) &&
6755 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6756 return false;
6757
6758 return true;
6759 }
6760
6761 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6762 {
6763 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6764
6765 /* GDG double wide on either pipe, otherwise pipe A only */
6766 return INTEL_GEN(dev_priv) < 4 &&
6767 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6768 }
6769
6770 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6771 {
6772 u32 pixel_rate;
6773
6774 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6775
6776 /*
6777 * We only use IF-ID interlacing. If we ever use
6778 * PF-ID we'll need to adjust the pixel_rate here.
6779 */
6780
6781 if (pipe_config->pch_pfit.enabled) {
6782 u64 pipe_w, pipe_h, pfit_w, pfit_h;
6783 u32 pfit_size = pipe_config->pch_pfit.size;
6784
6785 pipe_w = pipe_config->pipe_src_w;
6786 pipe_h = pipe_config->pipe_src_h;
6787
6788 pfit_w = (pfit_size >> 16) & 0xFFFF;
6789 pfit_h = pfit_size & 0xFFFF;
6790 if (pipe_w < pfit_w)
6791 pipe_w = pfit_w;
6792 if (pipe_h < pfit_h)
6793 pipe_h = pfit_h;
6794
6795 if (WARN_ON(!pfit_w || !pfit_h))
6796 return pixel_rate;
6797
6798 pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
6799 pfit_w * pfit_h);
6800 }
6801
6802 return pixel_rate;
6803 }
6804
6805 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6806 {
6807 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6808
6809 if (HAS_GMCH(dev_priv))
6810 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6811 crtc_state->pixel_rate =
6812 crtc_state->base.adjusted_mode.crtc_clock;
6813 else
6814 crtc_state->pixel_rate =
6815 ilk_pipe_pixel_rate(crtc_state);
6816 }
6817
6818 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6819 struct intel_crtc_state *pipe_config)
6820 {
6821 struct drm_device *dev = crtc->base.dev;
6822 struct drm_i915_private *dev_priv = to_i915(dev);
6823 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6824 int clock_limit = dev_priv->max_dotclk_freq;
6825
6826 if (INTEL_GEN(dev_priv) < 4) {
6827 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6828
6829 /*
6830 * Enable double wide mode when the dot clock
6831 * is > 90% of the (display) core speed.
6832 */
6833 if (intel_crtc_supports_double_wide(crtc) &&
6834 adjusted_mode->crtc_clock > clock_limit) {
6835 clock_limit = dev_priv->max_dotclk_freq;
6836 pipe_config->double_wide = true;
6837 }
6838 }
6839
6840 if (adjusted_mode->crtc_clock > clock_limit) {
6841 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6842 adjusted_mode->crtc_clock, clock_limit,
6843 yesno(pipe_config->double_wide));
6844 return -EINVAL;
6845 }
6846
6847 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6848 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6849 pipe_config->base.ctm) {
6850 /*
6851 * There is only one pipe CSC unit per pipe, and we need that
6852 * for output conversion from RGB->YCBCR. So if CTM is already
6853 * applied we can't support YCBCR420 output.
6854 */
6855 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6856 return -EINVAL;
6857 }
6858
6859 /*
6860 * Pipe horizontal size must be even in:
6861 * - DVO ganged mode
6862 * - LVDS dual channel mode
6863 * - Double wide pipe
6864 */
6865 if (pipe_config->pipe_src_w & 1) {
6866 if (pipe_config->double_wide) {
6867 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6868 return -EINVAL;
6869 }
6870
6871 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6872 intel_is_dual_link_lvds(dev)) {
6873 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6874 return -EINVAL;
6875 }
6876 }
6877
6878 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6879 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6880 */
6881 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6882 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6883 return -EINVAL;
6884
6885 intel_crtc_compute_pixel_rate(pipe_config);
6886
6887 if (pipe_config->has_pch_encoder)
6888 return ironlake_fdi_compute_config(crtc, pipe_config);
6889
6890 return 0;
6891 }
6892
6893 static void
6894 intel_reduce_m_n_ratio(u32 *num, u32 *den)
6895 {
6896 while (*num > DATA_LINK_M_N_MASK ||
6897 *den > DATA_LINK_M_N_MASK) {
6898 *num >>= 1;
6899 *den >>= 1;
6900 }
6901 }
6902
6903 static void compute_m_n(unsigned int m, unsigned int n,
6904 u32 *ret_m, u32 *ret_n,
6905 bool constant_n)
6906 {
6907 /*
6908 * Several DP dongles in particular seem to be fussy about
6909 * too large link M/N values. Give N value as 0x8000 that
6910 * should be acceptable by specific devices. 0x8000 is the
6911 * specified fixed N value for asynchronous clock mode,
6912 * which the devices expect also in synchronous clock mode.
6913 */
6914 if (constant_n)
6915 *ret_n = 0x8000;
6916 else
6917 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6918
6919 *ret_m = div_u64((u64)m * *ret_n, n);
6920 intel_reduce_m_n_ratio(ret_m, ret_n);
6921 }
6922
6923 void
6924 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
6925 int pixel_clock, int link_clock,
6926 struct intel_link_m_n *m_n,
6927 bool constant_n)
6928 {
6929 m_n->tu = 64;
6930
6931 compute_m_n(bits_per_pixel * pixel_clock,
6932 link_clock * nlanes * 8,
6933 &m_n->gmch_m, &m_n->gmch_n,
6934 constant_n);
6935
6936 compute_m_n(pixel_clock, link_clock,
6937 &m_n->link_m, &m_n->link_n,
6938 constant_n);
6939 }
6940
6941 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6942 {
6943 if (i915_modparams.panel_use_ssc >= 0)
6944 return i915_modparams.panel_use_ssc != 0;
6945 return dev_priv->vbt.lvds_use_ssc
6946 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6947 }
6948
6949 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
6950 {
6951 return (1 << dpll->n) << 16 | dpll->m2;
6952 }
6953
6954 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
6955 {
6956 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6957 }
6958
6959 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6960 struct intel_crtc_state *crtc_state,
6961 struct dpll *reduced_clock)
6962 {
6963 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6964 u32 fp, fp2 = 0;
6965
6966 if (IS_PINEVIEW(dev_priv)) {
6967 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6968 if (reduced_clock)
6969 fp2 = pnv_dpll_compute_fp(reduced_clock);
6970 } else {
6971 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6972 if (reduced_clock)
6973 fp2 = i9xx_dpll_compute_fp(reduced_clock);
6974 }
6975
6976 crtc_state->dpll_hw_state.fp0 = fp;
6977
6978 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6979 reduced_clock) {
6980 crtc_state->dpll_hw_state.fp1 = fp2;
6981 } else {
6982 crtc_state->dpll_hw_state.fp1 = fp;
6983 }
6984 }
6985
6986 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6987 pipe)
6988 {
6989 u32 reg_val;
6990
6991 /*
6992 * PLLB opamp always calibrates to max value of 0x3f, force enable it
6993 * and set it to a reasonable value instead.
6994 */
6995 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6996 reg_val &= 0xffffff00;
6997 reg_val |= 0x00000030;
6998 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6999
7000 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7001 reg_val &= 0x00ffffff;
7002 reg_val |= 0x8c000000;
7003 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7004
7005 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7006 reg_val &= 0xffffff00;
7007 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7008
7009 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7010 reg_val &= 0x00ffffff;
7011 reg_val |= 0xb0000000;
7012 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7013 }
7014
7015 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7016 const struct intel_link_m_n *m_n)
7017 {
7018 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7019 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7020 enum pipe pipe = crtc->pipe;
7021
7022 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7023 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7024 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7025 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7026 }
7027
7028 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7029 enum transcoder transcoder)
7030 {
7031 if (IS_HASWELL(dev_priv))
7032 return transcoder == TRANSCODER_EDP;
7033
7034 /*
7035 * Strictly speaking some registers are available before
7036 * gen7, but we only support DRRS on gen7+
7037 */
7038 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7039 }
7040
7041 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7042 const struct intel_link_m_n *m_n,
7043 const struct intel_link_m_n *m2_n2)
7044 {
7045 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7046 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7047 enum pipe pipe = crtc->pipe;
7048 enum transcoder transcoder = crtc_state->cpu_transcoder;
7049
7050 if (INTEL_GEN(dev_priv) >= 5) {
7051 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7052 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7053 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7054 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7055 /*
7056 * M2_N2 registers are set only if DRRS is supported
7057 * (to make sure the registers are not unnecessarily accessed).
7058 */
7059 if (m2_n2 && crtc_state->has_drrs &&
7060 transcoder_has_m2_n2(dev_priv, transcoder)) {
7061 I915_WRITE(PIPE_DATA_M2(transcoder),
7062 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7063 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7064 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7065 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7066 }
7067 } else {
7068 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7069 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7070 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7071 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7072 }
7073 }
7074
7075 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7076 {
7077 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7078
7079 if (m_n == M1_N1) {
7080 dp_m_n = &crtc_state->dp_m_n;
7081 dp_m2_n2 = &crtc_state->dp_m2_n2;
7082 } else if (m_n == M2_N2) {
7083
7084 /*
7085 * M2_N2 registers are not supported. Hence m2_n2 divider value
7086 * needs to be programmed into M1_N1.
7087 */
7088 dp_m_n = &crtc_state->dp_m2_n2;
7089 } else {
7090 DRM_ERROR("Unsupported divider value\n");
7091 return;
7092 }
7093
7094 if (crtc_state->has_pch_encoder)
7095 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7096 else
7097 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7098 }
7099
7100 static void vlv_compute_dpll(struct intel_crtc *crtc,
7101 struct intel_crtc_state *pipe_config)
7102 {
7103 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7104 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7105 if (crtc->pipe != PIPE_A)
7106 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7107
7108 /* DPLL not used with DSI, but still need the rest set up */
7109 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7110 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7111 DPLL_EXT_BUFFER_ENABLE_VLV;
7112
7113 pipe_config->dpll_hw_state.dpll_md =
7114 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7115 }
7116
7117 static void chv_compute_dpll(struct intel_crtc *crtc,
7118 struct intel_crtc_state *pipe_config)
7119 {
7120 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7121 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7122 if (crtc->pipe != PIPE_A)
7123 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7124
7125 /* DPLL not used with DSI, but still need the rest set up */
7126 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7127 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7128
7129 pipe_config->dpll_hw_state.dpll_md =
7130 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7131 }
7132
7133 static void vlv_prepare_pll(struct intel_crtc *crtc,
7134 const struct intel_crtc_state *pipe_config)
7135 {
7136 struct drm_device *dev = crtc->base.dev;
7137 struct drm_i915_private *dev_priv = to_i915(dev);
7138 enum pipe pipe = crtc->pipe;
7139 u32 mdiv;
7140 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7141 u32 coreclk, reg_val;
7142
7143 /* Enable Refclk */
7144 I915_WRITE(DPLL(pipe),
7145 pipe_config->dpll_hw_state.dpll &
7146 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7147
7148 /* No need to actually set up the DPLL with DSI */
7149 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7150 return;
7151
7152 mutex_lock(&dev_priv->sb_lock);
7153
7154 bestn = pipe_config->dpll.n;
7155 bestm1 = pipe_config->dpll.m1;
7156 bestm2 = pipe_config->dpll.m2;
7157 bestp1 = pipe_config->dpll.p1;
7158 bestp2 = pipe_config->dpll.p2;
7159
7160 /* See eDP HDMI DPIO driver vbios notes doc */
7161
7162 /* PLL B needs special handling */
7163 if (pipe == PIPE_B)
7164 vlv_pllb_recal_opamp(dev_priv, pipe);
7165
7166 /* Set up Tx target for periodic Rcomp update */
7167 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7168
7169 /* Disable target IRef on PLL */
7170 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7171 reg_val &= 0x00ffffff;
7172 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7173
7174 /* Disable fast lock */
7175 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7176
7177 /* Set idtafcrecal before PLL is enabled */
7178 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7179 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7180 mdiv |= ((bestn << DPIO_N_SHIFT));
7181 mdiv |= (1 << DPIO_K_SHIFT);
7182
7183 /*
7184 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7185 * but we don't support that).
7186 * Note: don't use the DAC post divider as it seems unstable.
7187 */
7188 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7189 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7190
7191 mdiv |= DPIO_ENABLE_CALIBRATION;
7192 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7193
7194 /* Set HBR and RBR LPF coefficients */
7195 if (pipe_config->port_clock == 162000 ||
7196 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7197 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7198 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7199 0x009f0003);
7200 else
7201 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7202 0x00d0000f);
7203
7204 if (intel_crtc_has_dp_encoder(pipe_config)) {
7205 /* Use SSC source */
7206 if (pipe == PIPE_A)
7207 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7208 0x0df40000);
7209 else
7210 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7211 0x0df70000);
7212 } else { /* HDMI or VGA */
7213 /* Use bend source */
7214 if (pipe == PIPE_A)
7215 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7216 0x0df70000);
7217 else
7218 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7219 0x0df40000);
7220 }
7221
7222 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7223 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7224 if (intel_crtc_has_dp_encoder(pipe_config))
7225 coreclk |= 0x01000000;
7226 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7227
7228 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7229 mutex_unlock(&dev_priv->sb_lock);
7230 }
7231
7232 static void chv_prepare_pll(struct intel_crtc *crtc,
7233 const struct intel_crtc_state *pipe_config)
7234 {
7235 struct drm_device *dev = crtc->base.dev;
7236 struct drm_i915_private *dev_priv = to_i915(dev);
7237 enum pipe pipe = crtc->pipe;
7238 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7239 u32 loopfilter, tribuf_calcntr;
7240 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7241 u32 dpio_val;
7242 int vco;
7243
7244 /* Enable Refclk and SSC */
7245 I915_WRITE(DPLL(pipe),
7246 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7247
7248 /* No need to actually set up the DPLL with DSI */
7249 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7250 return;
7251
7252 bestn = pipe_config->dpll.n;
7253 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7254 bestm1 = pipe_config->dpll.m1;
7255 bestm2 = pipe_config->dpll.m2 >> 22;
7256 bestp1 = pipe_config->dpll.p1;
7257 bestp2 = pipe_config->dpll.p2;
7258 vco = pipe_config->dpll.vco;
7259 dpio_val = 0;
7260 loopfilter = 0;
7261
7262 mutex_lock(&dev_priv->sb_lock);
7263
7264 /* p1 and p2 divider */
7265 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7266 5 << DPIO_CHV_S1_DIV_SHIFT |
7267 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7268 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7269 1 << DPIO_CHV_K_DIV_SHIFT);
7270
7271 /* Feedback post-divider - m2 */
7272 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7273
7274 /* Feedback refclk divider - n and m1 */
7275 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7276 DPIO_CHV_M1_DIV_BY_2 |
7277 1 << DPIO_CHV_N_DIV_SHIFT);
7278
7279 /* M2 fraction division */
7280 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7281
7282 /* M2 fraction division enable */
7283 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7284 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7285 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7286 if (bestm2_frac)
7287 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7288 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7289
7290 /* Program digital lock detect threshold */
7291 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7292 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7293 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7294 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7295 if (!bestm2_frac)
7296 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7297 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7298
7299 /* Loop filter */
7300 if (vco == 5400000) {
7301 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7302 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7303 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7304 tribuf_calcntr = 0x9;
7305 } else if (vco <= 6200000) {
7306 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7307 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7308 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7309 tribuf_calcntr = 0x9;
7310 } else if (vco <= 6480000) {
7311 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7312 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7313 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7314 tribuf_calcntr = 0x8;
7315 } else {
7316 /* Not supported. Apply the same limits as in the max case */
7317 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7318 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7319 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7320 tribuf_calcntr = 0;
7321 }
7322 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7323
7324 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7325 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7326 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7327 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7328
7329 /* AFC Recal */
7330 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7331 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7332 DPIO_AFC_RECAL);
7333
7334 mutex_unlock(&dev_priv->sb_lock);
7335 }
7336
7337 /**
7338 * vlv_force_pll_on - forcibly enable just the PLL
7339 * @dev_priv: i915 private structure
7340 * @pipe: pipe PLL to enable
7341 * @dpll: PLL configuration
7342 *
7343 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7344 * in cases where we need the PLL enabled even when @pipe is not going to
7345 * be enabled.
7346 */
7347 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7348 const struct dpll *dpll)
7349 {
7350 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7351 struct intel_crtc_state *pipe_config;
7352
7353 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7354 if (!pipe_config)
7355 return -ENOMEM;
7356
7357 pipe_config->base.crtc = &crtc->base;
7358 pipe_config->pixel_multiplier = 1;
7359 pipe_config->dpll = *dpll;
7360
7361 if (IS_CHERRYVIEW(dev_priv)) {
7362 chv_compute_dpll(crtc, pipe_config);
7363 chv_prepare_pll(crtc, pipe_config);
7364 chv_enable_pll(crtc, pipe_config);
7365 } else {
7366 vlv_compute_dpll(crtc, pipe_config);
7367 vlv_prepare_pll(crtc, pipe_config);
7368 vlv_enable_pll(crtc, pipe_config);
7369 }
7370
7371 kfree(pipe_config);
7372
7373 return 0;
7374 }
7375
7376 /**
7377 * vlv_force_pll_off - forcibly disable just the PLL
7378 * @dev_priv: i915 private structure
7379 * @pipe: pipe PLL to disable
7380 *
7381 * Disable the PLL for @pipe. To be used in cases where we need
7382 * the PLL enabled even when @pipe is not going to be enabled.
7383 */
7384 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7385 {
7386 if (IS_CHERRYVIEW(dev_priv))
7387 chv_disable_pll(dev_priv, pipe);
7388 else
7389 vlv_disable_pll(dev_priv, pipe);
7390 }
7391
7392 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7393 struct intel_crtc_state *crtc_state,
7394 struct dpll *reduced_clock)
7395 {
7396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7397 u32 dpll;
7398 struct dpll *clock = &crtc_state->dpll;
7399
7400 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7401
7402 dpll = DPLL_VGA_MODE_DIS;
7403
7404 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7405 dpll |= DPLLB_MODE_LVDS;
7406 else
7407 dpll |= DPLLB_MODE_DAC_SERIAL;
7408
7409 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7410 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7411 dpll |= (crtc_state->pixel_multiplier - 1)
7412 << SDVO_MULTIPLIER_SHIFT_HIRES;
7413 }
7414
7415 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7416 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7417 dpll |= DPLL_SDVO_HIGH_SPEED;
7418
7419 if (intel_crtc_has_dp_encoder(crtc_state))
7420 dpll |= DPLL_SDVO_HIGH_SPEED;
7421
7422 /* compute bitmask from p1 value */
7423 if (IS_PINEVIEW(dev_priv))
7424 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7425 else {
7426 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7427 if (IS_G4X(dev_priv) && reduced_clock)
7428 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7429 }
7430 switch (clock->p2) {
7431 case 5:
7432 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7433 break;
7434 case 7:
7435 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7436 break;
7437 case 10:
7438 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7439 break;
7440 case 14:
7441 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7442 break;
7443 }
7444 if (INTEL_GEN(dev_priv) >= 4)
7445 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7446
7447 if (crtc_state->sdvo_tv_clock)
7448 dpll |= PLL_REF_INPUT_TVCLKINBC;
7449 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7450 intel_panel_use_ssc(dev_priv))
7451 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7452 else
7453 dpll |= PLL_REF_INPUT_DREFCLK;
7454
7455 dpll |= DPLL_VCO_ENABLE;
7456 crtc_state->dpll_hw_state.dpll = dpll;
7457
7458 if (INTEL_GEN(dev_priv) >= 4) {
7459 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7460 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7461 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7462 }
7463 }
7464
7465 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7466 struct intel_crtc_state *crtc_state,
7467 struct dpll *reduced_clock)
7468 {
7469 struct drm_device *dev = crtc->base.dev;
7470 struct drm_i915_private *dev_priv = to_i915(dev);
7471 u32 dpll;
7472 struct dpll *clock = &crtc_state->dpll;
7473
7474 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7475
7476 dpll = DPLL_VGA_MODE_DIS;
7477
7478 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7479 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7480 } else {
7481 if (clock->p1 == 2)
7482 dpll |= PLL_P1_DIVIDE_BY_TWO;
7483 else
7484 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7485 if (clock->p2 == 4)
7486 dpll |= PLL_P2_DIVIDE_BY_4;
7487 }
7488
7489 if (!IS_I830(dev_priv) &&
7490 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7491 dpll |= DPLL_DVO_2X_MODE;
7492
7493 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7494 intel_panel_use_ssc(dev_priv))
7495 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7496 else
7497 dpll |= PLL_REF_INPUT_DREFCLK;
7498
7499 dpll |= DPLL_VCO_ENABLE;
7500 crtc_state->dpll_hw_state.dpll = dpll;
7501 }
7502
7503 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7504 {
7505 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7506 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7507 enum pipe pipe = crtc->pipe;
7508 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7509 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7510 u32 crtc_vtotal, crtc_vblank_end;
7511 int vsyncshift = 0;
7512
7513 /* We need to be careful not to changed the adjusted mode, for otherwise
7514 * the hw state checker will get angry at the mismatch. */
7515 crtc_vtotal = adjusted_mode->crtc_vtotal;
7516 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7517
7518 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7519 /* the chip adds 2 halflines automatically */
7520 crtc_vtotal -= 1;
7521 crtc_vblank_end -= 1;
7522
7523 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7524 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7525 else
7526 vsyncshift = adjusted_mode->crtc_hsync_start -
7527 adjusted_mode->crtc_htotal / 2;
7528 if (vsyncshift < 0)
7529 vsyncshift += adjusted_mode->crtc_htotal;
7530 }
7531
7532 if (INTEL_GEN(dev_priv) > 3)
7533 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7534
7535 I915_WRITE(HTOTAL(cpu_transcoder),
7536 (adjusted_mode->crtc_hdisplay - 1) |
7537 ((adjusted_mode->crtc_htotal - 1) << 16));
7538 I915_WRITE(HBLANK(cpu_transcoder),
7539 (adjusted_mode->crtc_hblank_start - 1) |
7540 ((adjusted_mode->crtc_hblank_end - 1) << 16));
7541 I915_WRITE(HSYNC(cpu_transcoder),
7542 (adjusted_mode->crtc_hsync_start - 1) |
7543 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7544
7545 I915_WRITE(VTOTAL(cpu_transcoder),
7546 (adjusted_mode->crtc_vdisplay - 1) |
7547 ((crtc_vtotal - 1) << 16));
7548 I915_WRITE(VBLANK(cpu_transcoder),
7549 (adjusted_mode->crtc_vblank_start - 1) |
7550 ((crtc_vblank_end - 1) << 16));
7551 I915_WRITE(VSYNC(cpu_transcoder),
7552 (adjusted_mode->crtc_vsync_start - 1) |
7553 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7554
7555 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7556 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7557 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7558 * bits. */
7559 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7560 (pipe == PIPE_B || pipe == PIPE_C))
7561 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7562
7563 }
7564
7565 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7566 {
7567 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7568 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7569 enum pipe pipe = crtc->pipe;
7570
7571 /* pipesrc controls the size that is scaled from, which should
7572 * always be the user's requested size.
7573 */
7574 I915_WRITE(PIPESRC(pipe),
7575 ((crtc_state->pipe_src_w - 1) << 16) |
7576 (crtc_state->pipe_src_h - 1));
7577 }
7578
7579 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7580 struct intel_crtc_state *pipe_config)
7581 {
7582 struct drm_device *dev = crtc->base.dev;
7583 struct drm_i915_private *dev_priv = to_i915(dev);
7584 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7585 u32 tmp;
7586
7587 tmp = I915_READ(HTOTAL(cpu_transcoder));
7588 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7589 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7590 tmp = I915_READ(HBLANK(cpu_transcoder));
7591 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7592 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7593 tmp = I915_READ(HSYNC(cpu_transcoder));
7594 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7595 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7596
7597 tmp = I915_READ(VTOTAL(cpu_transcoder));
7598 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7599 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7600 tmp = I915_READ(VBLANK(cpu_transcoder));
7601 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7602 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7603 tmp = I915_READ(VSYNC(cpu_transcoder));
7604 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7605 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7606
7607 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7608 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7609 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7610 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7611 }
7612 }
7613
7614 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7615 struct intel_crtc_state *pipe_config)
7616 {
7617 struct drm_device *dev = crtc->base.dev;
7618 struct drm_i915_private *dev_priv = to_i915(dev);
7619 u32 tmp;
7620
7621 tmp = I915_READ(PIPESRC(crtc->pipe));
7622 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7623 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7624
7625 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7626 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7627 }
7628
7629 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7630 struct intel_crtc_state *pipe_config)
7631 {
7632 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7633 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7634 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7635 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7636
7637 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7638 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7639 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7640 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7641
7642 mode->flags = pipe_config->base.adjusted_mode.flags;
7643 mode->type = DRM_MODE_TYPE_DRIVER;
7644
7645 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7646
7647 mode->hsync = drm_mode_hsync(mode);
7648 mode->vrefresh = drm_mode_vrefresh(mode);
7649 drm_mode_set_name(mode);
7650 }
7651
7652 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7653 {
7654 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7655 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7656 u32 pipeconf;
7657
7658 pipeconf = 0;
7659
7660 /* we keep both pipes enabled on 830 */
7661 if (IS_I830(dev_priv))
7662 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7663
7664 if (crtc_state->double_wide)
7665 pipeconf |= PIPECONF_DOUBLE_WIDE;
7666
7667 /* only g4x and later have fancy bpc/dither controls */
7668 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7669 IS_CHERRYVIEW(dev_priv)) {
7670 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7671 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7672 pipeconf |= PIPECONF_DITHER_EN |
7673 PIPECONF_DITHER_TYPE_SP;
7674
7675 switch (crtc_state->pipe_bpp) {
7676 case 18:
7677 pipeconf |= PIPECONF_6BPC;
7678 break;
7679 case 24:
7680 pipeconf |= PIPECONF_8BPC;
7681 break;
7682 case 30:
7683 pipeconf |= PIPECONF_10BPC;
7684 break;
7685 default:
7686 /* Case prevented by intel_choose_pipe_bpp_dither. */
7687 BUG();
7688 }
7689 }
7690
7691 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7692 if (INTEL_GEN(dev_priv) < 4 ||
7693 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7694 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7695 else
7696 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7697 } else
7698 pipeconf |= PIPECONF_PROGRESSIVE;
7699
7700 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7701 crtc_state->limited_color_range)
7702 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7703
7704 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7705 POSTING_READ(PIPECONF(crtc->pipe));
7706 }
7707
7708 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7709 struct intel_crtc_state *crtc_state)
7710 {
7711 struct drm_device *dev = crtc->base.dev;
7712 struct drm_i915_private *dev_priv = to_i915(dev);
7713 const struct intel_limit *limit;
7714 int refclk = 48000;
7715
7716 memset(&crtc_state->dpll_hw_state, 0,
7717 sizeof(crtc_state->dpll_hw_state));
7718
7719 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7720 if (intel_panel_use_ssc(dev_priv)) {
7721 refclk = dev_priv->vbt.lvds_ssc_freq;
7722 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7723 }
7724
7725 limit = &intel_limits_i8xx_lvds;
7726 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7727 limit = &intel_limits_i8xx_dvo;
7728 } else {
7729 limit = &intel_limits_i8xx_dac;
7730 }
7731
7732 if (!crtc_state->clock_set &&
7733 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7734 refclk, NULL, &crtc_state->dpll)) {
7735 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7736 return -EINVAL;
7737 }
7738
7739 i8xx_compute_dpll(crtc, crtc_state, NULL);
7740
7741 return 0;
7742 }
7743
7744 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7745 struct intel_crtc_state *crtc_state)
7746 {
7747 struct drm_device *dev = crtc->base.dev;
7748 struct drm_i915_private *dev_priv = to_i915(dev);
7749 const struct intel_limit *limit;
7750 int refclk = 96000;
7751
7752 memset(&crtc_state->dpll_hw_state, 0,
7753 sizeof(crtc_state->dpll_hw_state));
7754
7755 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7756 if (intel_panel_use_ssc(dev_priv)) {
7757 refclk = dev_priv->vbt.lvds_ssc_freq;
7758 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7759 }
7760
7761 if (intel_is_dual_link_lvds(dev))
7762 limit = &intel_limits_g4x_dual_channel_lvds;
7763 else
7764 limit = &intel_limits_g4x_single_channel_lvds;
7765 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7766 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7767 limit = &intel_limits_g4x_hdmi;
7768 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7769 limit = &intel_limits_g4x_sdvo;
7770 } else {
7771 /* The option is for other outputs */
7772 limit = &intel_limits_i9xx_sdvo;
7773 }
7774
7775 if (!crtc_state->clock_set &&
7776 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7777 refclk, NULL, &crtc_state->dpll)) {
7778 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7779 return -EINVAL;
7780 }
7781
7782 i9xx_compute_dpll(crtc, crtc_state, NULL);
7783
7784 return 0;
7785 }
7786
7787 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7788 struct intel_crtc_state *crtc_state)
7789 {
7790 struct drm_device *dev = crtc->base.dev;
7791 struct drm_i915_private *dev_priv = to_i915(dev);
7792 const struct intel_limit *limit;
7793 int refclk = 96000;
7794
7795 memset(&crtc_state->dpll_hw_state, 0,
7796 sizeof(crtc_state->dpll_hw_state));
7797
7798 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7799 if (intel_panel_use_ssc(dev_priv)) {
7800 refclk = dev_priv->vbt.lvds_ssc_freq;
7801 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7802 }
7803
7804 limit = &intel_limits_pineview_lvds;
7805 } else {
7806 limit = &intel_limits_pineview_sdvo;
7807 }
7808
7809 if (!crtc_state->clock_set &&
7810 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7811 refclk, NULL, &crtc_state->dpll)) {
7812 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7813 return -EINVAL;
7814 }
7815
7816 i9xx_compute_dpll(crtc, crtc_state, NULL);
7817
7818 return 0;
7819 }
7820
7821 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7822 struct intel_crtc_state *crtc_state)
7823 {
7824 struct drm_device *dev = crtc->base.dev;
7825 struct drm_i915_private *dev_priv = to_i915(dev);
7826 const struct intel_limit *limit;
7827 int refclk = 96000;
7828
7829 memset(&crtc_state->dpll_hw_state, 0,
7830 sizeof(crtc_state->dpll_hw_state));
7831
7832 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7833 if (intel_panel_use_ssc(dev_priv)) {
7834 refclk = dev_priv->vbt.lvds_ssc_freq;
7835 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7836 }
7837
7838 limit = &intel_limits_i9xx_lvds;
7839 } else {
7840 limit = &intel_limits_i9xx_sdvo;
7841 }
7842
7843 if (!crtc_state->clock_set &&
7844 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7845 refclk, NULL, &crtc_state->dpll)) {
7846 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7847 return -EINVAL;
7848 }
7849
7850 i9xx_compute_dpll(crtc, crtc_state, NULL);
7851
7852 return 0;
7853 }
7854
7855 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7856 struct intel_crtc_state *crtc_state)
7857 {
7858 int refclk = 100000;
7859 const struct intel_limit *limit = &intel_limits_chv;
7860
7861 memset(&crtc_state->dpll_hw_state, 0,
7862 sizeof(crtc_state->dpll_hw_state));
7863
7864 if (!crtc_state->clock_set &&
7865 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7866 refclk, NULL, &crtc_state->dpll)) {
7867 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7868 return -EINVAL;
7869 }
7870
7871 chv_compute_dpll(crtc, crtc_state);
7872
7873 return 0;
7874 }
7875
7876 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7877 struct intel_crtc_state *crtc_state)
7878 {
7879 int refclk = 100000;
7880 const struct intel_limit *limit = &intel_limits_vlv;
7881
7882 memset(&crtc_state->dpll_hw_state, 0,
7883 sizeof(crtc_state->dpll_hw_state));
7884
7885 if (!crtc_state->clock_set &&
7886 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7887 refclk, NULL, &crtc_state->dpll)) {
7888 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7889 return -EINVAL;
7890 }
7891
7892 vlv_compute_dpll(crtc, crtc_state);
7893
7894 return 0;
7895 }
7896
7897 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7898 struct intel_crtc_state *pipe_config)
7899 {
7900 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7901 u32 tmp;
7902
7903 if (INTEL_GEN(dev_priv) <= 3 &&
7904 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7905 return;
7906
7907 tmp = I915_READ(PFIT_CONTROL);
7908 if (!(tmp & PFIT_ENABLE))
7909 return;
7910
7911 /* Check whether the pfit is attached to our pipe. */
7912 if (INTEL_GEN(dev_priv) < 4) {
7913 if (crtc->pipe != PIPE_B)
7914 return;
7915 } else {
7916 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7917 return;
7918 }
7919
7920 pipe_config->gmch_pfit.control = tmp;
7921 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7922 }
7923
7924 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7925 struct intel_crtc_state *pipe_config)
7926 {
7927 struct drm_device *dev = crtc->base.dev;
7928 struct drm_i915_private *dev_priv = to_i915(dev);
7929 int pipe = pipe_config->cpu_transcoder;
7930 struct dpll clock;
7931 u32 mdiv;
7932 int refclk = 100000;
7933
7934 /* In case of DSI, DPLL will not be used */
7935 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7936 return;
7937
7938 mutex_lock(&dev_priv->sb_lock);
7939 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7940 mutex_unlock(&dev_priv->sb_lock);
7941
7942 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7943 clock.m2 = mdiv & DPIO_M2DIV_MASK;
7944 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7945 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7946 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7947
7948 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7949 }
7950
7951 static void
7952 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7953 struct intel_initial_plane_config *plane_config)
7954 {
7955 struct drm_device *dev = crtc->base.dev;
7956 struct drm_i915_private *dev_priv = to_i915(dev);
7957 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7958 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7959 enum pipe pipe;
7960 u32 val, base, offset;
7961 int fourcc, pixel_format;
7962 unsigned int aligned_height;
7963 struct drm_framebuffer *fb;
7964 struct intel_framebuffer *intel_fb;
7965
7966 if (!plane->get_hw_state(plane, &pipe))
7967 return;
7968
7969 WARN_ON(pipe != crtc->pipe);
7970
7971 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7972 if (!intel_fb) {
7973 DRM_DEBUG_KMS("failed to alloc fb\n");
7974 return;
7975 }
7976
7977 fb = &intel_fb->base;
7978
7979 fb->dev = dev;
7980
7981 val = I915_READ(DSPCNTR(i9xx_plane));
7982
7983 if (INTEL_GEN(dev_priv) >= 4) {
7984 if (val & DISPPLANE_TILED) {
7985 plane_config->tiling = I915_TILING_X;
7986 fb->modifier = I915_FORMAT_MOD_X_TILED;
7987 }
7988
7989 if (val & DISPPLANE_ROTATE_180)
7990 plane_config->rotation = DRM_MODE_ROTATE_180;
7991 }
7992
7993 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
7994 val & DISPPLANE_MIRROR)
7995 plane_config->rotation |= DRM_MODE_REFLECT_X;
7996
7997 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7998 fourcc = i9xx_format_to_fourcc(pixel_format);
7999 fb->format = drm_format_info(fourcc);
8000
8001 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8002 offset = I915_READ(DSPOFFSET(i9xx_plane));
8003 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8004 } else if (INTEL_GEN(dev_priv) >= 4) {
8005 if (plane_config->tiling)
8006 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8007 else
8008 offset = I915_READ(DSPLINOFF(i9xx_plane));
8009 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8010 } else {
8011 base = I915_READ(DSPADDR(i9xx_plane));
8012 }
8013 plane_config->base = base;
8014
8015 val = I915_READ(PIPESRC(pipe));
8016 fb->width = ((val >> 16) & 0xfff) + 1;
8017 fb->height = ((val >> 0) & 0xfff) + 1;
8018
8019 val = I915_READ(DSPSTRIDE(i9xx_plane));
8020 fb->pitches[0] = val & 0xffffffc0;
8021
8022 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8023
8024 plane_config->size = fb->pitches[0] * aligned_height;
8025
8026 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8027 crtc->base.name, plane->base.name, fb->width, fb->height,
8028 fb->format->cpp[0] * 8, base, fb->pitches[0],
8029 plane_config->size);
8030
8031 plane_config->fb = intel_fb;
8032 }
8033
8034 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8035 struct intel_crtc_state *pipe_config)
8036 {
8037 struct drm_device *dev = crtc->base.dev;
8038 struct drm_i915_private *dev_priv = to_i915(dev);
8039 int pipe = pipe_config->cpu_transcoder;
8040 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8041 struct dpll clock;
8042 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8043 int refclk = 100000;
8044
8045 /* In case of DSI, DPLL will not be used */
8046 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8047 return;
8048
8049 mutex_lock(&dev_priv->sb_lock);
8050 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8051 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8052 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8053 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8054 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8055 mutex_unlock(&dev_priv->sb_lock);
8056
8057 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8058 clock.m2 = (pll_dw0 & 0xff) << 22;
8059 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8060 clock.m2 |= pll_dw2 & 0x3fffff;
8061 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8062 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8063 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8064
8065 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8066 }
8067
8068 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8069 struct intel_crtc_state *pipe_config)
8070 {
8071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8072 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8073
8074 pipe_config->lspcon_downsampling = false;
8075
8076 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8077 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8078
8079 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8080 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8081 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8082
8083 if (ycbcr420_enabled) {
8084 /* We support 4:2:0 in full blend mode only */
8085 if (!blend)
8086 output = INTEL_OUTPUT_FORMAT_INVALID;
8087 else if (!(IS_GEMINILAKE(dev_priv) ||
8088 INTEL_GEN(dev_priv) >= 10))
8089 output = INTEL_OUTPUT_FORMAT_INVALID;
8090 else
8091 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8092 } else {
8093 /*
8094 * Currently there is no interface defined to
8095 * check user preference between RGB/YCBCR444
8096 * or YCBCR420. So the only possible case for
8097 * YCBCR444 usage is driving YCBCR420 output
8098 * with LSPCON, when pipe is configured for
8099 * YCBCR444 output and LSPCON takes care of
8100 * downsampling it.
8101 */
8102 pipe_config->lspcon_downsampling = true;
8103 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8104 }
8105 }
8106 }
8107
8108 pipe_config->output_format = output;
8109 }
8110
8111 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8112 struct intel_crtc_state *pipe_config)
8113 {
8114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8115 enum intel_display_power_domain power_domain;
8116 intel_wakeref_t wakeref;
8117 u32 tmp;
8118 bool ret;
8119
8120 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8121 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8122 if (!wakeref)
8123 return false;
8124
8125 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8126 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8127 pipe_config->shared_dpll = NULL;
8128
8129 ret = false;
8130
8131 tmp = I915_READ(PIPECONF(crtc->pipe));
8132 if (!(tmp & PIPECONF_ENABLE))
8133 goto out;
8134
8135 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8136 IS_CHERRYVIEW(dev_priv)) {
8137 switch (tmp & PIPECONF_BPC_MASK) {
8138 case PIPECONF_6BPC:
8139 pipe_config->pipe_bpp = 18;
8140 break;
8141 case PIPECONF_8BPC:
8142 pipe_config->pipe_bpp = 24;
8143 break;
8144 case PIPECONF_10BPC:
8145 pipe_config->pipe_bpp = 30;
8146 break;
8147 default:
8148 break;
8149 }
8150 }
8151
8152 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8153 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8154 pipe_config->limited_color_range = true;
8155
8156 if (INTEL_GEN(dev_priv) < 4)
8157 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8158
8159 intel_get_pipe_timings(crtc, pipe_config);
8160 intel_get_pipe_src_size(crtc, pipe_config);
8161
8162 i9xx_get_pfit_config(crtc, pipe_config);
8163
8164 if (INTEL_GEN(dev_priv) >= 4) {
8165 /* No way to read it out on pipes B and C */
8166 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8167 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8168 else
8169 tmp = I915_READ(DPLL_MD(crtc->pipe));
8170 pipe_config->pixel_multiplier =
8171 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8172 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8173 pipe_config->dpll_hw_state.dpll_md = tmp;
8174 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8175 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8176 tmp = I915_READ(DPLL(crtc->pipe));
8177 pipe_config->pixel_multiplier =
8178 ((tmp & SDVO_MULTIPLIER_MASK)
8179 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8180 } else {
8181 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8182 * port and will be fixed up in the encoder->get_config
8183 * function. */
8184 pipe_config->pixel_multiplier = 1;
8185 }
8186 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8187 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8188 /*
8189 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8190 * on 830. Filter it out here so that we don't
8191 * report errors due to that.
8192 */
8193 if (IS_I830(dev_priv))
8194 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8195
8196 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8197 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8198 } else {
8199 /* Mask out read-only status bits. */
8200 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8201 DPLL_PORTC_READY_MASK |
8202 DPLL_PORTB_READY_MASK);
8203 }
8204
8205 if (IS_CHERRYVIEW(dev_priv))
8206 chv_crtc_clock_get(crtc, pipe_config);
8207 else if (IS_VALLEYVIEW(dev_priv))
8208 vlv_crtc_clock_get(crtc, pipe_config);
8209 else
8210 i9xx_crtc_clock_get(crtc, pipe_config);
8211
8212 /*
8213 * Normally the dotclock is filled in by the encoder .get_config()
8214 * but in case the pipe is enabled w/o any ports we need a sane
8215 * default.
8216 */
8217 pipe_config->base.adjusted_mode.crtc_clock =
8218 pipe_config->port_clock / pipe_config->pixel_multiplier;
8219
8220 ret = true;
8221
8222 out:
8223 intel_display_power_put(dev_priv, power_domain, wakeref);
8224
8225 return ret;
8226 }
8227
8228 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8229 {
8230 struct intel_encoder *encoder;
8231 int i;
8232 u32 val, final;
8233 bool has_lvds = false;
8234 bool has_cpu_edp = false;
8235 bool has_panel = false;
8236 bool has_ck505 = false;
8237 bool can_ssc = false;
8238 bool using_ssc_source = false;
8239
8240 /* We need to take the global config into account */
8241 for_each_intel_encoder(&dev_priv->drm, encoder) {
8242 switch (encoder->type) {
8243 case INTEL_OUTPUT_LVDS:
8244 has_panel = true;
8245 has_lvds = true;
8246 break;
8247 case INTEL_OUTPUT_EDP:
8248 has_panel = true;
8249 if (encoder->port == PORT_A)
8250 has_cpu_edp = true;
8251 break;
8252 default:
8253 break;
8254 }
8255 }
8256
8257 if (HAS_PCH_IBX(dev_priv)) {
8258 has_ck505 = dev_priv->vbt.display_clock_mode;
8259 can_ssc = has_ck505;
8260 } else {
8261 has_ck505 = false;
8262 can_ssc = true;
8263 }
8264
8265 /* Check if any DPLLs are using the SSC source */
8266 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8267 u32 temp = I915_READ(PCH_DPLL(i));
8268
8269 if (!(temp & DPLL_VCO_ENABLE))
8270 continue;
8271
8272 if ((temp & PLL_REF_INPUT_MASK) ==
8273 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8274 using_ssc_source = true;
8275 break;
8276 }
8277 }
8278
8279 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8280 has_panel, has_lvds, has_ck505, using_ssc_source);
8281
8282 /* Ironlake: try to setup display ref clock before DPLL
8283 * enabling. This is only under driver's control after
8284 * PCH B stepping, previous chipset stepping should be
8285 * ignoring this setting.
8286 */
8287 val = I915_READ(PCH_DREF_CONTROL);
8288
8289 /* As we must carefully and slowly disable/enable each source in turn,
8290 * compute the final state we want first and check if we need to
8291 * make any changes at all.
8292 */
8293 final = val;
8294 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8295 if (has_ck505)
8296 final |= DREF_NONSPREAD_CK505_ENABLE;
8297 else
8298 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8299
8300 final &= ~DREF_SSC_SOURCE_MASK;
8301 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8302 final &= ~DREF_SSC1_ENABLE;
8303
8304 if (has_panel) {
8305 final |= DREF_SSC_SOURCE_ENABLE;
8306
8307 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8308 final |= DREF_SSC1_ENABLE;
8309
8310 if (has_cpu_edp) {
8311 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8312 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8313 else
8314 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8315 } else
8316 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8317 } else if (using_ssc_source) {
8318 final |= DREF_SSC_SOURCE_ENABLE;
8319 final |= DREF_SSC1_ENABLE;
8320 }
8321
8322 if (final == val)
8323 return;
8324
8325 /* Always enable nonspread source */
8326 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8327
8328 if (has_ck505)
8329 val |= DREF_NONSPREAD_CK505_ENABLE;
8330 else
8331 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8332
8333 if (has_panel) {
8334 val &= ~DREF_SSC_SOURCE_MASK;
8335 val |= DREF_SSC_SOURCE_ENABLE;
8336
8337 /* SSC must be turned on before enabling the CPU output */
8338 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8339 DRM_DEBUG_KMS("Using SSC on panel\n");
8340 val |= DREF_SSC1_ENABLE;
8341 } else
8342 val &= ~DREF_SSC1_ENABLE;
8343
8344 /* Get SSC going before enabling the outputs */
8345 I915_WRITE(PCH_DREF_CONTROL, val);
8346 POSTING_READ(PCH_DREF_CONTROL);
8347 udelay(200);
8348
8349 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8350
8351 /* Enable CPU source on CPU attached eDP */
8352 if (has_cpu_edp) {
8353 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8354 DRM_DEBUG_KMS("Using SSC on eDP\n");
8355 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8356 } else
8357 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8358 } else
8359 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8360
8361 I915_WRITE(PCH_DREF_CONTROL, val);
8362 POSTING_READ(PCH_DREF_CONTROL);
8363 udelay(200);
8364 } else {
8365 DRM_DEBUG_KMS("Disabling CPU source output\n");
8366
8367 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8368
8369 /* Turn off CPU output */
8370 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8371
8372 I915_WRITE(PCH_DREF_CONTROL, val);
8373 POSTING_READ(PCH_DREF_CONTROL);
8374 udelay(200);
8375
8376 if (!using_ssc_source) {
8377 DRM_DEBUG_KMS("Disabling SSC source\n");
8378
8379 /* Turn off the SSC source */
8380 val &= ~DREF_SSC_SOURCE_MASK;
8381 val |= DREF_SSC_SOURCE_DISABLE;
8382
8383 /* Turn off SSC1 */
8384 val &= ~DREF_SSC1_ENABLE;
8385
8386 I915_WRITE(PCH_DREF_CONTROL, val);
8387 POSTING_READ(PCH_DREF_CONTROL);
8388 udelay(200);
8389 }
8390 }
8391
8392 BUG_ON(val != final);
8393 }
8394
8395 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8396 {
8397 u32 tmp;
8398
8399 tmp = I915_READ(SOUTH_CHICKEN2);
8400 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8401 I915_WRITE(SOUTH_CHICKEN2, tmp);
8402
8403 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8404 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8405 DRM_ERROR("FDI mPHY reset assert timeout\n");
8406
8407 tmp = I915_READ(SOUTH_CHICKEN2);
8408 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8409 I915_WRITE(SOUTH_CHICKEN2, tmp);
8410
8411 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8412 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8413 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8414 }
8415
8416 /* WaMPhyProgramming:hsw */
8417 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8418 {
8419 u32 tmp;
8420
8421 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8422 tmp &= ~(0xFF << 24);
8423 tmp |= (0x12 << 24);
8424 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8425
8426 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8427 tmp |= (1 << 11);
8428 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8429
8430 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8431 tmp |= (1 << 11);
8432 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8433
8434 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8435 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8436 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8437
8438 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8439 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8440 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8441
8442 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8443 tmp &= ~(7 << 13);
8444 tmp |= (5 << 13);
8445 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8446
8447 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8448 tmp &= ~(7 << 13);
8449 tmp |= (5 << 13);
8450 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8451
8452 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8453 tmp &= ~0xFF;
8454 tmp |= 0x1C;
8455 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8456
8457 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8458 tmp &= ~0xFF;
8459 tmp |= 0x1C;
8460 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8461
8462 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8463 tmp &= ~(0xFF << 16);
8464 tmp |= (0x1C << 16);
8465 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8466
8467 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8468 tmp &= ~(0xFF << 16);
8469 tmp |= (0x1C << 16);
8470 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8471
8472 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8473 tmp |= (1 << 27);
8474 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8475
8476 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8477 tmp |= (1 << 27);
8478 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8479
8480 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8481 tmp &= ~(0xF << 28);
8482 tmp |= (4 << 28);
8483 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8484
8485 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8486 tmp &= ~(0xF << 28);
8487 tmp |= (4 << 28);
8488 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8489 }
8490
8491 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8492 * Programming" based on the parameters passed:
8493 * - Sequence to enable CLKOUT_DP
8494 * - Sequence to enable CLKOUT_DP without spread
8495 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8496 */
8497 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8498 bool with_spread, bool with_fdi)
8499 {
8500 u32 reg, tmp;
8501
8502 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8503 with_spread = true;
8504 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8505 with_fdi, "LP PCH doesn't have FDI\n"))
8506 with_fdi = false;
8507
8508 mutex_lock(&dev_priv->sb_lock);
8509
8510 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8511 tmp &= ~SBI_SSCCTL_DISABLE;
8512 tmp |= SBI_SSCCTL_PATHALT;
8513 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8514
8515 udelay(24);
8516
8517 if (with_spread) {
8518 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8519 tmp &= ~SBI_SSCCTL_PATHALT;
8520 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8521
8522 if (with_fdi) {
8523 lpt_reset_fdi_mphy(dev_priv);
8524 lpt_program_fdi_mphy(dev_priv);
8525 }
8526 }
8527
8528 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8529 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8530 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8531 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8532
8533 mutex_unlock(&dev_priv->sb_lock);
8534 }
8535
8536 /* Sequence to disable CLKOUT_DP */
8537 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8538 {
8539 u32 reg, tmp;
8540
8541 mutex_lock(&dev_priv->sb_lock);
8542
8543 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8544 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8545 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8546 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8547
8548 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8549 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8550 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8551 tmp |= SBI_SSCCTL_PATHALT;
8552 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8553 udelay(32);
8554 }
8555 tmp |= SBI_SSCCTL_DISABLE;
8556 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8557 }
8558
8559 mutex_unlock(&dev_priv->sb_lock);
8560 }
8561
8562 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8563
8564 static const u16 sscdivintphase[] = {
8565 [BEND_IDX( 50)] = 0x3B23,
8566 [BEND_IDX( 45)] = 0x3B23,
8567 [BEND_IDX( 40)] = 0x3C23,
8568 [BEND_IDX( 35)] = 0x3C23,
8569 [BEND_IDX( 30)] = 0x3D23,
8570 [BEND_IDX( 25)] = 0x3D23,
8571 [BEND_IDX( 20)] = 0x3E23,
8572 [BEND_IDX( 15)] = 0x3E23,
8573 [BEND_IDX( 10)] = 0x3F23,
8574 [BEND_IDX( 5)] = 0x3F23,
8575 [BEND_IDX( 0)] = 0x0025,
8576 [BEND_IDX( -5)] = 0x0025,
8577 [BEND_IDX(-10)] = 0x0125,
8578 [BEND_IDX(-15)] = 0x0125,
8579 [BEND_IDX(-20)] = 0x0225,
8580 [BEND_IDX(-25)] = 0x0225,
8581 [BEND_IDX(-30)] = 0x0325,
8582 [BEND_IDX(-35)] = 0x0325,
8583 [BEND_IDX(-40)] = 0x0425,
8584 [BEND_IDX(-45)] = 0x0425,
8585 [BEND_IDX(-50)] = 0x0525,
8586 };
8587
8588 /*
8589 * Bend CLKOUT_DP
8590 * steps -50 to 50 inclusive, in steps of 5
8591 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8592 * change in clock period = -(steps / 10) * 5.787 ps
8593 */
8594 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8595 {
8596 u32 tmp;
8597 int idx = BEND_IDX(steps);
8598
8599 if (WARN_ON(steps % 5 != 0))
8600 return;
8601
8602 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8603 return;
8604
8605 mutex_lock(&dev_priv->sb_lock);
8606
8607 if (steps % 10 != 0)
8608 tmp = 0xAAAAAAAB;
8609 else
8610 tmp = 0x00000000;
8611 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8612
8613 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8614 tmp &= 0xffff0000;
8615 tmp |= sscdivintphase[idx];
8616 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8617
8618 mutex_unlock(&dev_priv->sb_lock);
8619 }
8620
8621 #undef BEND_IDX
8622
8623 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8624 {
8625 struct intel_encoder *encoder;
8626 bool has_vga = false;
8627
8628 for_each_intel_encoder(&dev_priv->drm, encoder) {
8629 switch (encoder->type) {
8630 case INTEL_OUTPUT_ANALOG:
8631 has_vga = true;
8632 break;
8633 default:
8634 break;
8635 }
8636 }
8637
8638 if (has_vga) {
8639 lpt_bend_clkout_dp(dev_priv, 0);
8640 lpt_enable_clkout_dp(dev_priv, true, true);
8641 } else {
8642 lpt_disable_clkout_dp(dev_priv);
8643 }
8644 }
8645
8646 /*
8647 * Initialize reference clocks when the driver loads
8648 */
8649 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8650 {
8651 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8652 ironlake_init_pch_refclk(dev_priv);
8653 else if (HAS_PCH_LPT(dev_priv))
8654 lpt_init_pch_refclk(dev_priv);
8655 }
8656
8657 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8658 {
8659 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8660 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8661 enum pipe pipe = crtc->pipe;
8662 u32 val;
8663
8664 val = 0;
8665
8666 switch (crtc_state->pipe_bpp) {
8667 case 18:
8668 val |= PIPECONF_6BPC;
8669 break;
8670 case 24:
8671 val |= PIPECONF_8BPC;
8672 break;
8673 case 30:
8674 val |= PIPECONF_10BPC;
8675 break;
8676 case 36:
8677 val |= PIPECONF_12BPC;
8678 break;
8679 default:
8680 /* Case prevented by intel_choose_pipe_bpp_dither. */
8681 BUG();
8682 }
8683
8684 if (crtc_state->dither)
8685 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8686
8687 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8688 val |= PIPECONF_INTERLACED_ILK;
8689 else
8690 val |= PIPECONF_PROGRESSIVE;
8691
8692 if (crtc_state->limited_color_range)
8693 val |= PIPECONF_COLOR_RANGE_SELECT;
8694
8695 I915_WRITE(PIPECONF(pipe), val);
8696 POSTING_READ(PIPECONF(pipe));
8697 }
8698
8699 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8700 {
8701 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8702 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8703 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8704 u32 val = 0;
8705
8706 if (IS_HASWELL(dev_priv) && crtc_state->dither)
8707 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8708
8709 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8710 val |= PIPECONF_INTERLACED_ILK;
8711 else
8712 val |= PIPECONF_PROGRESSIVE;
8713
8714 I915_WRITE(PIPECONF(cpu_transcoder), val);
8715 POSTING_READ(PIPECONF(cpu_transcoder));
8716 }
8717
8718 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8719 {
8720 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8721 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8722
8723 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8724 u32 val = 0;
8725
8726 switch (crtc_state->pipe_bpp) {
8727 case 18:
8728 val |= PIPEMISC_DITHER_6_BPC;
8729 break;
8730 case 24:
8731 val |= PIPEMISC_DITHER_8_BPC;
8732 break;
8733 case 30:
8734 val |= PIPEMISC_DITHER_10_BPC;
8735 break;
8736 case 36:
8737 val |= PIPEMISC_DITHER_12_BPC;
8738 break;
8739 default:
8740 /* Case prevented by pipe_config_set_bpp. */
8741 BUG();
8742 }
8743
8744 if (crtc_state->dither)
8745 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8746
8747 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8748 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8749 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8750
8751 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8752 val |= PIPEMISC_YUV420_ENABLE |
8753 PIPEMISC_YUV420_MODE_FULL_BLEND;
8754
8755 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8756 }
8757 }
8758
8759 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8760 {
8761 /*
8762 * Account for spread spectrum to avoid
8763 * oversubscribing the link. Max center spread
8764 * is 2.5%; use 5% for safety's sake.
8765 */
8766 u32 bps = target_clock * bpp * 21 / 20;
8767 return DIV_ROUND_UP(bps, link_bw * 8);
8768 }
8769
8770 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8771 {
8772 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8773 }
8774
8775 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8776 struct intel_crtc_state *crtc_state,
8777 struct dpll *reduced_clock)
8778 {
8779 struct drm_crtc *crtc = &intel_crtc->base;
8780 struct drm_device *dev = crtc->dev;
8781 struct drm_i915_private *dev_priv = to_i915(dev);
8782 u32 dpll, fp, fp2;
8783 int factor;
8784
8785 /* Enable autotuning of the PLL clock (if permissible) */
8786 factor = 21;
8787 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8788 if ((intel_panel_use_ssc(dev_priv) &&
8789 dev_priv->vbt.lvds_ssc_freq == 100000) ||
8790 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8791 factor = 25;
8792 } else if (crtc_state->sdvo_tv_clock)
8793 factor = 20;
8794
8795 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8796
8797 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8798 fp |= FP_CB_TUNE;
8799
8800 if (reduced_clock) {
8801 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8802
8803 if (reduced_clock->m < factor * reduced_clock->n)
8804 fp2 |= FP_CB_TUNE;
8805 } else {
8806 fp2 = fp;
8807 }
8808
8809 dpll = 0;
8810
8811 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8812 dpll |= DPLLB_MODE_LVDS;
8813 else
8814 dpll |= DPLLB_MODE_DAC_SERIAL;
8815
8816 dpll |= (crtc_state->pixel_multiplier - 1)
8817 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8818
8819 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8820 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8821 dpll |= DPLL_SDVO_HIGH_SPEED;
8822
8823 if (intel_crtc_has_dp_encoder(crtc_state))
8824 dpll |= DPLL_SDVO_HIGH_SPEED;
8825
8826 /*
8827 * The high speed IO clock is only really required for
8828 * SDVO/HDMI/DP, but we also enable it for CRT to make it
8829 * possible to share the DPLL between CRT and HDMI. Enabling
8830 * the clock needlessly does no real harm, except use up a
8831 * bit of power potentially.
8832 *
8833 * We'll limit this to IVB with 3 pipes, since it has only two
8834 * DPLLs and so DPLL sharing is the only way to get three pipes
8835 * driving PCH ports at the same time. On SNB we could do this,
8836 * and potentially avoid enabling the second DPLL, but it's not
8837 * clear if it''s a win or loss power wise. No point in doing
8838 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8839 */
8840 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8841 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8842 dpll |= DPLL_SDVO_HIGH_SPEED;
8843
8844 /* compute bitmask from p1 value */
8845 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8846 /* also FPA1 */
8847 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8848
8849 switch (crtc_state->dpll.p2) {
8850 case 5:
8851 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8852 break;
8853 case 7:
8854 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8855 break;
8856 case 10:
8857 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8858 break;
8859 case 14:
8860 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8861 break;
8862 }
8863
8864 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8865 intel_panel_use_ssc(dev_priv))
8866 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8867 else
8868 dpll |= PLL_REF_INPUT_DREFCLK;
8869
8870 dpll |= DPLL_VCO_ENABLE;
8871
8872 crtc_state->dpll_hw_state.dpll = dpll;
8873 crtc_state->dpll_hw_state.fp0 = fp;
8874 crtc_state->dpll_hw_state.fp1 = fp2;
8875 }
8876
8877 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8878 struct intel_crtc_state *crtc_state)
8879 {
8880 struct drm_device *dev = crtc->base.dev;
8881 struct drm_i915_private *dev_priv = to_i915(dev);
8882 const struct intel_limit *limit;
8883 int refclk = 120000;
8884
8885 memset(&crtc_state->dpll_hw_state, 0,
8886 sizeof(crtc_state->dpll_hw_state));
8887
8888 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8889 if (!crtc_state->has_pch_encoder)
8890 return 0;
8891
8892 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8893 if (intel_panel_use_ssc(dev_priv)) {
8894 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8895 dev_priv->vbt.lvds_ssc_freq);
8896 refclk = dev_priv->vbt.lvds_ssc_freq;
8897 }
8898
8899 if (intel_is_dual_link_lvds(dev)) {
8900 if (refclk == 100000)
8901 limit = &intel_limits_ironlake_dual_lvds_100m;
8902 else
8903 limit = &intel_limits_ironlake_dual_lvds;
8904 } else {
8905 if (refclk == 100000)
8906 limit = &intel_limits_ironlake_single_lvds_100m;
8907 else
8908 limit = &intel_limits_ironlake_single_lvds;
8909 }
8910 } else {
8911 limit = &intel_limits_ironlake_dac;
8912 }
8913
8914 if (!crtc_state->clock_set &&
8915 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8916 refclk, NULL, &crtc_state->dpll)) {
8917 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8918 return -EINVAL;
8919 }
8920
8921 ironlake_compute_dpll(crtc, crtc_state, NULL);
8922
8923 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8924 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8925 pipe_name(crtc->pipe));
8926 return -EINVAL;
8927 }
8928
8929 return 0;
8930 }
8931
8932 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8933 struct intel_link_m_n *m_n)
8934 {
8935 struct drm_device *dev = crtc->base.dev;
8936 struct drm_i915_private *dev_priv = to_i915(dev);
8937 enum pipe pipe = crtc->pipe;
8938
8939 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8940 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8941 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8942 & ~TU_SIZE_MASK;
8943 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8944 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8945 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8946 }
8947
8948 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8949 enum transcoder transcoder,
8950 struct intel_link_m_n *m_n,
8951 struct intel_link_m_n *m2_n2)
8952 {
8953 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8954 enum pipe pipe = crtc->pipe;
8955
8956 if (INTEL_GEN(dev_priv) >= 5) {
8957 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8958 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8959 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8960 & ~TU_SIZE_MASK;
8961 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8962 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8963 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8964
8965 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
8966 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8967 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8968 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8969 & ~TU_SIZE_MASK;
8970 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8971 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8972 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8973 }
8974 } else {
8975 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8976 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8977 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8978 & ~TU_SIZE_MASK;
8979 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8980 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8981 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8982 }
8983 }
8984
8985 void intel_dp_get_m_n(struct intel_crtc *crtc,
8986 struct intel_crtc_state *pipe_config)
8987 {
8988 if (pipe_config->has_pch_encoder)
8989 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8990 else
8991 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8992 &pipe_config->dp_m_n,
8993 &pipe_config->dp_m2_n2);
8994 }
8995
8996 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8997 struct intel_crtc_state *pipe_config)
8998 {
8999 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9000 &pipe_config->fdi_m_n, NULL);
9001 }
9002
9003 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9004 struct intel_crtc_state *pipe_config)
9005 {
9006 struct drm_device *dev = crtc->base.dev;
9007 struct drm_i915_private *dev_priv = to_i915(dev);
9008 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9009 u32 ps_ctrl = 0;
9010 int id = -1;
9011 int i;
9012
9013 /* find scaler attached to this pipe */
9014 for (i = 0; i < crtc->num_scalers; i++) {
9015 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9016 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9017 id = i;
9018 pipe_config->pch_pfit.enabled = true;
9019 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9020 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9021 scaler_state->scalers[i].in_use = true;
9022 break;
9023 }
9024 }
9025
9026 scaler_state->scaler_id = id;
9027 if (id >= 0) {
9028 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9029 } else {
9030 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9031 }
9032 }
9033
9034 static void
9035 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9036 struct intel_initial_plane_config *plane_config)
9037 {
9038 struct drm_device *dev = crtc->base.dev;
9039 struct drm_i915_private *dev_priv = to_i915(dev);
9040 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9041 enum plane_id plane_id = plane->id;
9042 enum pipe pipe;
9043 u32 val, base, offset, stride_mult, tiling, alpha;
9044 int fourcc, pixel_format;
9045 unsigned int aligned_height;
9046 struct drm_framebuffer *fb;
9047 struct intel_framebuffer *intel_fb;
9048
9049 if (!plane->get_hw_state(plane, &pipe))
9050 return;
9051
9052 WARN_ON(pipe != crtc->pipe);
9053
9054 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9055 if (!intel_fb) {
9056 DRM_DEBUG_KMS("failed to alloc fb\n");
9057 return;
9058 }
9059
9060 fb = &intel_fb->base;
9061
9062 fb->dev = dev;
9063
9064 val = I915_READ(PLANE_CTL(pipe, plane_id));
9065
9066 if (INTEL_GEN(dev_priv) >= 11)
9067 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9068 else
9069 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9070
9071 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9072 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9073 alpha &= PLANE_COLOR_ALPHA_MASK;
9074 } else {
9075 alpha = val & PLANE_CTL_ALPHA_MASK;
9076 }
9077
9078 fourcc = skl_format_to_fourcc(pixel_format,
9079 val & PLANE_CTL_ORDER_RGBX, alpha);
9080 fb->format = drm_format_info(fourcc);
9081
9082 tiling = val & PLANE_CTL_TILED_MASK;
9083 switch (tiling) {
9084 case PLANE_CTL_TILED_LINEAR:
9085 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9086 break;
9087 case PLANE_CTL_TILED_X:
9088 plane_config->tiling = I915_TILING_X;
9089 fb->modifier = I915_FORMAT_MOD_X_TILED;
9090 break;
9091 case PLANE_CTL_TILED_Y:
9092 plane_config->tiling = I915_TILING_Y;
9093 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9094 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9095 else
9096 fb->modifier = I915_FORMAT_MOD_Y_TILED;
9097 break;
9098 case PLANE_CTL_TILED_YF:
9099 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9100 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9101 else
9102 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9103 break;
9104 default:
9105 MISSING_CASE(tiling);
9106 goto error;
9107 }
9108
9109 /*
9110 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9111 * while i915 HW rotation is clockwise, thats why this swapping.
9112 */
9113 switch (val & PLANE_CTL_ROTATE_MASK) {
9114 case PLANE_CTL_ROTATE_0:
9115 plane_config->rotation = DRM_MODE_ROTATE_0;
9116 break;
9117 case PLANE_CTL_ROTATE_90:
9118 plane_config->rotation = DRM_MODE_ROTATE_270;
9119 break;
9120 case PLANE_CTL_ROTATE_180:
9121 plane_config->rotation = DRM_MODE_ROTATE_180;
9122 break;
9123 case PLANE_CTL_ROTATE_270:
9124 plane_config->rotation = DRM_MODE_ROTATE_90;
9125 break;
9126 }
9127
9128 if (INTEL_GEN(dev_priv) >= 10 &&
9129 val & PLANE_CTL_FLIP_HORIZONTAL)
9130 plane_config->rotation |= DRM_MODE_REFLECT_X;
9131
9132 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9133 plane_config->base = base;
9134
9135 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9136
9137 val = I915_READ(PLANE_SIZE(pipe, plane_id));
9138 fb->height = ((val >> 16) & 0xfff) + 1;
9139 fb->width = ((val >> 0) & 0x1fff) + 1;
9140
9141 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9142 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9143 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9144
9145 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9146
9147 plane_config->size = fb->pitches[0] * aligned_height;
9148
9149 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9150 crtc->base.name, plane->base.name, fb->width, fb->height,
9151 fb->format->cpp[0] * 8, base, fb->pitches[0],
9152 plane_config->size);
9153
9154 plane_config->fb = intel_fb;
9155 return;
9156
9157 error:
9158 kfree(intel_fb);
9159 }
9160
9161 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9162 struct intel_crtc_state *pipe_config)
9163 {
9164 struct drm_device *dev = crtc->base.dev;
9165 struct drm_i915_private *dev_priv = to_i915(dev);
9166 u32 tmp;
9167
9168 tmp = I915_READ(PF_CTL(crtc->pipe));
9169
9170 if (tmp & PF_ENABLE) {
9171 pipe_config->pch_pfit.enabled = true;
9172 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9173 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9174
9175 /* We currently do not free assignements of panel fitters on
9176 * ivb/hsw (since we don't use the higher upscaling modes which
9177 * differentiates them) so just WARN about this case for now. */
9178 if (IS_GEN(dev_priv, 7)) {
9179 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9180 PF_PIPE_SEL_IVB(crtc->pipe));
9181 }
9182 }
9183 }
9184
9185 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9186 struct intel_crtc_state *pipe_config)
9187 {
9188 struct drm_device *dev = crtc->base.dev;
9189 struct drm_i915_private *dev_priv = to_i915(dev);
9190 enum intel_display_power_domain power_domain;
9191 intel_wakeref_t wakeref;
9192 u32 tmp;
9193 bool ret;
9194
9195 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9196 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9197 if (!wakeref)
9198 return false;
9199
9200 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9201 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9202 pipe_config->shared_dpll = NULL;
9203
9204 ret = false;
9205 tmp = I915_READ(PIPECONF(crtc->pipe));
9206 if (!(tmp & PIPECONF_ENABLE))
9207 goto out;
9208
9209 switch (tmp & PIPECONF_BPC_MASK) {
9210 case PIPECONF_6BPC:
9211 pipe_config->pipe_bpp = 18;
9212 break;
9213 case PIPECONF_8BPC:
9214 pipe_config->pipe_bpp = 24;
9215 break;
9216 case PIPECONF_10BPC:
9217 pipe_config->pipe_bpp = 30;
9218 break;
9219 case PIPECONF_12BPC:
9220 pipe_config->pipe_bpp = 36;
9221 break;
9222 default:
9223 break;
9224 }
9225
9226 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9227 pipe_config->limited_color_range = true;
9228
9229 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9230 struct intel_shared_dpll *pll;
9231 enum intel_dpll_id pll_id;
9232
9233 pipe_config->has_pch_encoder = true;
9234
9235 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9236 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9237 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9238
9239 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9240
9241 if (HAS_PCH_IBX(dev_priv)) {
9242 /*
9243 * The pipe->pch transcoder and pch transcoder->pll
9244 * mapping is fixed.
9245 */
9246 pll_id = (enum intel_dpll_id) crtc->pipe;
9247 } else {
9248 tmp = I915_READ(PCH_DPLL_SEL);
9249 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9250 pll_id = DPLL_ID_PCH_PLL_B;
9251 else
9252 pll_id= DPLL_ID_PCH_PLL_A;
9253 }
9254
9255 pipe_config->shared_dpll =
9256 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9257 pll = pipe_config->shared_dpll;
9258
9259 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9260 &pipe_config->dpll_hw_state));
9261
9262 tmp = pipe_config->dpll_hw_state.dpll;
9263 pipe_config->pixel_multiplier =
9264 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9265 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9266
9267 ironlake_pch_clock_get(crtc, pipe_config);
9268 } else {
9269 pipe_config->pixel_multiplier = 1;
9270 }
9271
9272 intel_get_pipe_timings(crtc, pipe_config);
9273 intel_get_pipe_src_size(crtc, pipe_config);
9274
9275 ironlake_get_pfit_config(crtc, pipe_config);
9276
9277 ret = true;
9278
9279 out:
9280 intel_display_power_put(dev_priv, power_domain, wakeref);
9281
9282 return ret;
9283 }
9284
9285 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9286 {
9287 struct drm_device *dev = &dev_priv->drm;
9288 struct intel_crtc *crtc;
9289
9290 for_each_intel_crtc(dev, crtc)
9291 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9292 pipe_name(crtc->pipe));
9293
9294 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9295 "Display power well on\n");
9296 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9297 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9298 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9299 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9300 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9301 "CPU PWM1 enabled\n");
9302 if (IS_HASWELL(dev_priv))
9303 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9304 "CPU PWM2 enabled\n");
9305 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9306 "PCH PWM1 enabled\n");
9307 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9308 "Utility pin enabled\n");
9309 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9310
9311 /*
9312 * In theory we can still leave IRQs enabled, as long as only the HPD
9313 * interrupts remain enabled. We used to check for that, but since it's
9314 * gen-specific and since we only disable LCPLL after we fully disable
9315 * the interrupts, the check below should be enough.
9316 */
9317 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9318 }
9319
9320 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
9321 {
9322 if (IS_HASWELL(dev_priv))
9323 return I915_READ(D_COMP_HSW);
9324 else
9325 return I915_READ(D_COMP_BDW);
9326 }
9327
9328 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
9329 {
9330 if (IS_HASWELL(dev_priv)) {
9331 mutex_lock(&dev_priv->pcu_lock);
9332 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9333 val))
9334 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9335 mutex_unlock(&dev_priv->pcu_lock);
9336 } else {
9337 I915_WRITE(D_COMP_BDW, val);
9338 POSTING_READ(D_COMP_BDW);
9339 }
9340 }
9341
9342 /*
9343 * This function implements pieces of two sequences from BSpec:
9344 * - Sequence for display software to disable LCPLL
9345 * - Sequence for display software to allow package C8+
9346 * The steps implemented here are just the steps that actually touch the LCPLL
9347 * register. Callers should take care of disabling all the display engine
9348 * functions, doing the mode unset, fixing interrupts, etc.
9349 */
9350 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9351 bool switch_to_fclk, bool allow_power_down)
9352 {
9353 u32 val;
9354
9355 assert_can_disable_lcpll(dev_priv);
9356
9357 val = I915_READ(LCPLL_CTL);
9358
9359 if (switch_to_fclk) {
9360 val |= LCPLL_CD_SOURCE_FCLK;
9361 I915_WRITE(LCPLL_CTL, val);
9362
9363 if (wait_for_us(I915_READ(LCPLL_CTL) &
9364 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9365 DRM_ERROR("Switching to FCLK failed\n");
9366
9367 val = I915_READ(LCPLL_CTL);
9368 }
9369
9370 val |= LCPLL_PLL_DISABLE;
9371 I915_WRITE(LCPLL_CTL, val);
9372 POSTING_READ(LCPLL_CTL);
9373
9374 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9375 DRM_ERROR("LCPLL still locked\n");
9376
9377 val = hsw_read_dcomp(dev_priv);
9378 val |= D_COMP_COMP_DISABLE;
9379 hsw_write_dcomp(dev_priv, val);
9380 ndelay(100);
9381
9382 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9383 1))
9384 DRM_ERROR("D_COMP RCOMP still in progress\n");
9385
9386 if (allow_power_down) {
9387 val = I915_READ(LCPLL_CTL);
9388 val |= LCPLL_POWER_DOWN_ALLOW;
9389 I915_WRITE(LCPLL_CTL, val);
9390 POSTING_READ(LCPLL_CTL);
9391 }
9392 }
9393
9394 /*
9395 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9396 * source.
9397 */
9398 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9399 {
9400 u32 val;
9401
9402 val = I915_READ(LCPLL_CTL);
9403
9404 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9405 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9406 return;
9407
9408 /*
9409 * Make sure we're not on PC8 state before disabling PC8, otherwise
9410 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9411 */
9412 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9413
9414 if (val & LCPLL_POWER_DOWN_ALLOW) {
9415 val &= ~LCPLL_POWER_DOWN_ALLOW;
9416 I915_WRITE(LCPLL_CTL, val);
9417 POSTING_READ(LCPLL_CTL);
9418 }
9419
9420 val = hsw_read_dcomp(dev_priv);
9421 val |= D_COMP_COMP_FORCE;
9422 val &= ~D_COMP_COMP_DISABLE;
9423 hsw_write_dcomp(dev_priv, val);
9424
9425 val = I915_READ(LCPLL_CTL);
9426 val &= ~LCPLL_PLL_DISABLE;
9427 I915_WRITE(LCPLL_CTL, val);
9428
9429 if (intel_wait_for_register(dev_priv,
9430 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9431 5))
9432 DRM_ERROR("LCPLL not locked yet\n");
9433
9434 if (val & LCPLL_CD_SOURCE_FCLK) {
9435 val = I915_READ(LCPLL_CTL);
9436 val &= ~LCPLL_CD_SOURCE_FCLK;
9437 I915_WRITE(LCPLL_CTL, val);
9438
9439 if (wait_for_us((I915_READ(LCPLL_CTL) &
9440 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9441 DRM_ERROR("Switching back to LCPLL failed\n");
9442 }
9443
9444 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9445
9446 intel_update_cdclk(dev_priv);
9447 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9448 }
9449
9450 /*
9451 * Package states C8 and deeper are really deep PC states that can only be
9452 * reached when all the devices on the system allow it, so even if the graphics
9453 * device allows PC8+, it doesn't mean the system will actually get to these
9454 * states. Our driver only allows PC8+ when going into runtime PM.
9455 *
9456 * The requirements for PC8+ are that all the outputs are disabled, the power
9457 * well is disabled and most interrupts are disabled, and these are also
9458 * requirements for runtime PM. When these conditions are met, we manually do
9459 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9460 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9461 * hang the machine.
9462 *
9463 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9464 * the state of some registers, so when we come back from PC8+ we need to
9465 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9466 * need to take care of the registers kept by RC6. Notice that this happens even
9467 * if we don't put the device in PCI D3 state (which is what currently happens
9468 * because of the runtime PM support).
9469 *
9470 * For more, read "Display Sequences for Package C8" on the hardware
9471 * documentation.
9472 */
9473 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9474 {
9475 u32 val;
9476
9477 DRM_DEBUG_KMS("Enabling package C8+\n");
9478
9479 if (HAS_PCH_LPT_LP(dev_priv)) {
9480 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9481 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9482 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9483 }
9484
9485 lpt_disable_clkout_dp(dev_priv);
9486 hsw_disable_lcpll(dev_priv, true, true);
9487 }
9488
9489 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9490 {
9491 u32 val;
9492
9493 DRM_DEBUG_KMS("Disabling package C8+\n");
9494
9495 hsw_restore_lcpll(dev_priv);
9496 lpt_init_pch_refclk(dev_priv);
9497
9498 if (HAS_PCH_LPT_LP(dev_priv)) {
9499 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9500 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9501 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9502 }
9503 }
9504
9505 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9506 struct intel_crtc_state *crtc_state)
9507 {
9508 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9509 struct intel_atomic_state *state =
9510 to_intel_atomic_state(crtc_state->base.state);
9511
9512 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9513 IS_ICELAKE(dev_priv)) {
9514 struct intel_encoder *encoder =
9515 intel_get_crtc_new_encoder(state, crtc_state);
9516
9517 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9518 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9519 pipe_name(crtc->pipe));
9520 return -EINVAL;
9521 }
9522 }
9523
9524 return 0;
9525 }
9526
9527 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9528 enum port port,
9529 struct intel_crtc_state *pipe_config)
9530 {
9531 enum intel_dpll_id id;
9532 u32 temp;
9533
9534 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9535 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9536
9537 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9538 return;
9539
9540 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9541 }
9542
9543 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9544 enum port port,
9545 struct intel_crtc_state *pipe_config)
9546 {
9547 enum intel_dpll_id id;
9548 u32 temp;
9549
9550 /* TODO: TBT pll not implemented. */
9551 if (intel_port_is_combophy(dev_priv, port)) {
9552 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9553 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9554 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9555
9556 if (WARN_ON(!intel_dpll_is_combophy(id)))
9557 return;
9558 } else if (intel_port_is_tc(dev_priv, port)) {
9559 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9560 } else {
9561 WARN(1, "Invalid port %x\n", port);
9562 return;
9563 }
9564
9565 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9566 }
9567
9568 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9569 enum port port,
9570 struct intel_crtc_state *pipe_config)
9571 {
9572 enum intel_dpll_id id;
9573
9574 switch (port) {
9575 case PORT_A:
9576 id = DPLL_ID_SKL_DPLL0;
9577 break;
9578 case PORT_B:
9579 id = DPLL_ID_SKL_DPLL1;
9580 break;
9581 case PORT_C:
9582 id = DPLL_ID_SKL_DPLL2;
9583 break;
9584 default:
9585 DRM_ERROR("Incorrect port type\n");
9586 return;
9587 }
9588
9589 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9590 }
9591
9592 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9593 enum port port,
9594 struct intel_crtc_state *pipe_config)
9595 {
9596 enum intel_dpll_id id;
9597 u32 temp;
9598
9599 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9600 id = temp >> (port * 3 + 1);
9601
9602 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9603 return;
9604
9605 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9606 }
9607
9608 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9609 enum port port,
9610 struct intel_crtc_state *pipe_config)
9611 {
9612 enum intel_dpll_id id;
9613 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9614
9615 switch (ddi_pll_sel) {
9616 case PORT_CLK_SEL_WRPLL1:
9617 id = DPLL_ID_WRPLL1;
9618 break;
9619 case PORT_CLK_SEL_WRPLL2:
9620 id = DPLL_ID_WRPLL2;
9621 break;
9622 case PORT_CLK_SEL_SPLL:
9623 id = DPLL_ID_SPLL;
9624 break;
9625 case PORT_CLK_SEL_LCPLL_810:
9626 id = DPLL_ID_LCPLL_810;
9627 break;
9628 case PORT_CLK_SEL_LCPLL_1350:
9629 id = DPLL_ID_LCPLL_1350;
9630 break;
9631 case PORT_CLK_SEL_LCPLL_2700:
9632 id = DPLL_ID_LCPLL_2700;
9633 break;
9634 default:
9635 MISSING_CASE(ddi_pll_sel);
9636 /* fall through */
9637 case PORT_CLK_SEL_NONE:
9638 return;
9639 }
9640
9641 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9642 }
9643
9644 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9645 struct intel_crtc_state *pipe_config,
9646 u64 *power_domain_mask)
9647 {
9648 struct drm_device *dev = crtc->base.dev;
9649 struct drm_i915_private *dev_priv = to_i915(dev);
9650 enum intel_display_power_domain power_domain;
9651 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
9652 unsigned long enabled_panel_transcoders = 0;
9653 enum transcoder panel_transcoder;
9654 u32 tmp;
9655
9656 if (IS_ICELAKE(dev_priv))
9657 panel_transcoder_mask |=
9658 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9659
9660 /*
9661 * The pipe->transcoder mapping is fixed with the exception of the eDP
9662 * and DSI transcoders handled below.
9663 */
9664 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9665
9666 /*
9667 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9668 * consistency and less surprising code; it's in always on power).
9669 */
9670 for_each_set_bit(panel_transcoder,
9671 &panel_transcoder_mask,
9672 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9673 enum pipe trans_pipe;
9674
9675 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9676 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9677 continue;
9678
9679 /*
9680 * Log all enabled ones, only use the first one.
9681 *
9682 * FIXME: This won't work for two separate DSI displays.
9683 */
9684 enabled_panel_transcoders |= BIT(panel_transcoder);
9685 if (enabled_panel_transcoders != BIT(panel_transcoder))
9686 continue;
9687
9688 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9689 default:
9690 WARN(1, "unknown pipe linked to transcoder %s\n",
9691 transcoder_name(panel_transcoder));
9692 /* fall through */
9693 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9694 case TRANS_DDI_EDP_INPUT_A_ON:
9695 trans_pipe = PIPE_A;
9696 break;
9697 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9698 trans_pipe = PIPE_B;
9699 break;
9700 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9701 trans_pipe = PIPE_C;
9702 break;
9703 }
9704
9705 if (trans_pipe == crtc->pipe)
9706 pipe_config->cpu_transcoder = panel_transcoder;
9707 }
9708
9709 /*
9710 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9711 */
9712 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9713 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9714
9715 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9716 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9717 return false;
9718
9719 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9720 *power_domain_mask |= BIT_ULL(power_domain);
9721
9722 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9723
9724 return tmp & PIPECONF_ENABLE;
9725 }
9726
9727 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9728 struct intel_crtc_state *pipe_config,
9729 u64 *power_domain_mask)
9730 {
9731 struct drm_device *dev = crtc->base.dev;
9732 struct drm_i915_private *dev_priv = to_i915(dev);
9733 enum intel_display_power_domain power_domain;
9734 enum port port;
9735 enum transcoder cpu_transcoder;
9736 u32 tmp;
9737
9738 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9739 if (port == PORT_A)
9740 cpu_transcoder = TRANSCODER_DSI_A;
9741 else
9742 cpu_transcoder = TRANSCODER_DSI_C;
9743
9744 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9745 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9746 continue;
9747
9748 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9749 *power_domain_mask |= BIT_ULL(power_domain);
9750
9751 /*
9752 * The PLL needs to be enabled with a valid divider
9753 * configuration, otherwise accessing DSI registers will hang
9754 * the machine. See BSpec North Display Engine
9755 * registers/MIPI[BXT]. We can break out here early, since we
9756 * need the same DSI PLL to be enabled for both DSI ports.
9757 */
9758 if (!bxt_dsi_pll_is_enabled(dev_priv))
9759 break;
9760
9761 /* XXX: this works for video mode only */
9762 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9763 if (!(tmp & DPI_ENABLE))
9764 continue;
9765
9766 tmp = I915_READ(MIPI_CTRL(port));
9767 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9768 continue;
9769
9770 pipe_config->cpu_transcoder = cpu_transcoder;
9771 break;
9772 }
9773
9774 return transcoder_is_dsi(pipe_config->cpu_transcoder);
9775 }
9776
9777 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9778 struct intel_crtc_state *pipe_config)
9779 {
9780 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9781 struct intel_shared_dpll *pll;
9782 enum port port;
9783 u32 tmp;
9784
9785 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9786
9787 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9788
9789 if (IS_ICELAKE(dev_priv))
9790 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9791 else if (IS_CANNONLAKE(dev_priv))
9792 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9793 else if (IS_GEN9_BC(dev_priv))
9794 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9795 else if (IS_GEN9_LP(dev_priv))
9796 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9797 else
9798 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9799
9800 pll = pipe_config->shared_dpll;
9801 if (pll) {
9802 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9803 &pipe_config->dpll_hw_state));
9804 }
9805
9806 /*
9807 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9808 * DDI E. So just check whether this pipe is wired to DDI E and whether
9809 * the PCH transcoder is on.
9810 */
9811 if (INTEL_GEN(dev_priv) < 9 &&
9812 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9813 pipe_config->has_pch_encoder = true;
9814
9815 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9816 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9817 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9818
9819 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9820 }
9821 }
9822
9823 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9824 struct intel_crtc_state *pipe_config)
9825 {
9826 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9827 enum intel_display_power_domain power_domain;
9828 u64 power_domain_mask;
9829 bool active;
9830
9831 intel_crtc_init_scalers(crtc, pipe_config);
9832
9833 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9834 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9835 return false;
9836 power_domain_mask = BIT_ULL(power_domain);
9837
9838 pipe_config->shared_dpll = NULL;
9839
9840 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9841
9842 if (IS_GEN9_LP(dev_priv) &&
9843 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9844 WARN_ON(active);
9845 active = true;
9846 }
9847
9848 if (!active)
9849 goto out;
9850
9851 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
9852 IS_ICELAKE(dev_priv)) {
9853 haswell_get_ddi_port_state(crtc, pipe_config);
9854 intel_get_pipe_timings(crtc, pipe_config);
9855 }
9856
9857 intel_get_pipe_src_size(crtc, pipe_config);
9858 intel_get_crtc_ycbcr_config(crtc, pipe_config);
9859
9860 pipe_config->gamma_mode =
9861 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9862
9863 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9864 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9865 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
9866 power_domain_mask |= BIT_ULL(power_domain);
9867
9868 if (INTEL_GEN(dev_priv) >= 9)
9869 skylake_get_pfit_config(crtc, pipe_config);
9870 else
9871 ironlake_get_pfit_config(crtc, pipe_config);
9872 }
9873
9874 if (hsw_crtc_supports_ips(crtc)) {
9875 if (IS_HASWELL(dev_priv))
9876 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9877 else {
9878 /*
9879 * We cannot readout IPS state on broadwell, set to
9880 * true so we can set it to a defined state on first
9881 * commit.
9882 */
9883 pipe_config->ips_enabled = true;
9884 }
9885 }
9886
9887 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9888 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9889 pipe_config->pixel_multiplier =
9890 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9891 } else {
9892 pipe_config->pixel_multiplier = 1;
9893 }
9894
9895 out:
9896 for_each_power_domain(power_domain, power_domain_mask)
9897 intel_display_power_put_unchecked(dev_priv, power_domain);
9898
9899 return active;
9900 }
9901
9902 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9903 {
9904 struct drm_i915_private *dev_priv =
9905 to_i915(plane_state->base.plane->dev);
9906 const struct drm_framebuffer *fb = plane_state->base.fb;
9907 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9908 u32 base;
9909
9910 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
9911 base = obj->phys_handle->busaddr;
9912 else
9913 base = intel_plane_ggtt_offset(plane_state);
9914
9915 base += plane_state->color_plane[0].offset;
9916
9917 /* ILK+ do this automagically */
9918 if (HAS_GMCH(dev_priv) &&
9919 plane_state->base.rotation & DRM_MODE_ROTATE_180)
9920 base += (plane_state->base.crtc_h *
9921 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9922
9923 return base;
9924 }
9925
9926 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9927 {
9928 int x = plane_state->base.crtc_x;
9929 int y = plane_state->base.crtc_y;
9930 u32 pos = 0;
9931
9932 if (x < 0) {
9933 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9934 x = -x;
9935 }
9936 pos |= x << CURSOR_X_SHIFT;
9937
9938 if (y < 0) {
9939 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9940 y = -y;
9941 }
9942 pos |= y << CURSOR_Y_SHIFT;
9943
9944 return pos;
9945 }
9946
9947 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9948 {
9949 const struct drm_mode_config *config =
9950 &plane_state->base.plane->dev->mode_config;
9951 int width = plane_state->base.crtc_w;
9952 int height = plane_state->base.crtc_h;
9953
9954 return width > 0 && width <= config->cursor_width &&
9955 height > 0 && height <= config->cursor_height;
9956 }
9957
9958 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
9959 {
9960 const struct drm_framebuffer *fb = plane_state->base.fb;
9961 unsigned int rotation = plane_state->base.rotation;
9962 int src_x, src_y;
9963 u32 offset;
9964 int ret;
9965
9966 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9967 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9968
9969 ret = intel_plane_check_stride(plane_state);
9970 if (ret)
9971 return ret;
9972
9973 src_x = plane_state->base.src_x >> 16;
9974 src_y = plane_state->base.src_y >> 16;
9975
9976 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9977 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9978 plane_state, 0);
9979
9980 if (src_x != 0 || src_y != 0) {
9981 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9982 return -EINVAL;
9983 }
9984
9985 plane_state->color_plane[0].offset = offset;
9986
9987 return 0;
9988 }
9989
9990 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9991 struct intel_plane_state *plane_state)
9992 {
9993 const struct drm_framebuffer *fb = plane_state->base.fb;
9994 int ret;
9995
9996 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9997 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9998 return -EINVAL;
9999 }
10000
10001 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10002 &crtc_state->base,
10003 DRM_PLANE_HELPER_NO_SCALING,
10004 DRM_PLANE_HELPER_NO_SCALING,
10005 true, true);
10006 if (ret)
10007 return ret;
10008
10009 if (!plane_state->base.visible)
10010 return 0;
10011
10012 ret = intel_plane_check_src_coordinates(plane_state);
10013 if (ret)
10014 return ret;
10015
10016 ret = intel_cursor_check_surface(plane_state);
10017 if (ret)
10018 return ret;
10019
10020 return 0;
10021 }
10022
10023 static unsigned int
10024 i845_cursor_max_stride(struct intel_plane *plane,
10025 u32 pixel_format, u64 modifier,
10026 unsigned int rotation)
10027 {
10028 return 2048;
10029 }
10030
10031 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10032 {
10033 return CURSOR_GAMMA_ENABLE;
10034 }
10035
10036 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10037 const struct intel_plane_state *plane_state)
10038 {
10039 return CURSOR_ENABLE |
10040 CURSOR_FORMAT_ARGB |
10041 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10042 }
10043
10044 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10045 {
10046 int width = plane_state->base.crtc_w;
10047
10048 /*
10049 * 845g/865g are only limited by the width of their cursors,
10050 * the height is arbitrary up to the precision of the register.
10051 */
10052 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10053 }
10054
10055 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10056 struct intel_plane_state *plane_state)
10057 {
10058 const struct drm_framebuffer *fb = plane_state->base.fb;
10059 int ret;
10060
10061 ret = intel_check_cursor(crtc_state, plane_state);
10062 if (ret)
10063 return ret;
10064
10065 /* if we want to turn off the cursor ignore width and height */
10066 if (!fb)
10067 return 0;
10068
10069 /* Check for which cursor types we support */
10070 if (!i845_cursor_size_ok(plane_state)) {
10071 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10072 plane_state->base.crtc_w,
10073 plane_state->base.crtc_h);
10074 return -EINVAL;
10075 }
10076
10077 WARN_ON(plane_state->base.visible &&
10078 plane_state->color_plane[0].stride != fb->pitches[0]);
10079
10080 switch (fb->pitches[0]) {
10081 case 256:
10082 case 512:
10083 case 1024:
10084 case 2048:
10085 break;
10086 default:
10087 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10088 fb->pitches[0]);
10089 return -EINVAL;
10090 }
10091
10092 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10093
10094 return 0;
10095 }
10096
10097 static void i845_update_cursor(struct intel_plane *plane,
10098 const struct intel_crtc_state *crtc_state,
10099 const struct intel_plane_state *plane_state)
10100 {
10101 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10102 u32 cntl = 0, base = 0, pos = 0, size = 0;
10103 unsigned long irqflags;
10104
10105 if (plane_state && plane_state->base.visible) {
10106 unsigned int width = plane_state->base.crtc_w;
10107 unsigned int height = plane_state->base.crtc_h;
10108
10109 cntl = plane_state->ctl |
10110 i845_cursor_ctl_crtc(crtc_state);
10111
10112 size = (height << 12) | width;
10113
10114 base = intel_cursor_base(plane_state);
10115 pos = intel_cursor_position(plane_state);
10116 }
10117
10118 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10119
10120 /* On these chipsets we can only modify the base/size/stride
10121 * whilst the cursor is disabled.
10122 */
10123 if (plane->cursor.base != base ||
10124 plane->cursor.size != size ||
10125 plane->cursor.cntl != cntl) {
10126 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10127 I915_WRITE_FW(CURBASE(PIPE_A), base);
10128 I915_WRITE_FW(CURSIZE, size);
10129 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10130 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10131
10132 plane->cursor.base = base;
10133 plane->cursor.size = size;
10134 plane->cursor.cntl = cntl;
10135 } else {
10136 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10137 }
10138
10139 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10140 }
10141
10142 static void i845_disable_cursor(struct intel_plane *plane,
10143 const struct intel_crtc_state *crtc_state)
10144 {
10145 i845_update_cursor(plane, crtc_state, NULL);
10146 }
10147
10148 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10149 enum pipe *pipe)
10150 {
10151 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10152 enum intel_display_power_domain power_domain;
10153 intel_wakeref_t wakeref;
10154 bool ret;
10155
10156 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10157 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10158 if (!wakeref)
10159 return false;
10160
10161 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10162
10163 *pipe = PIPE_A;
10164
10165 intel_display_power_put(dev_priv, power_domain, wakeref);
10166
10167 return ret;
10168 }
10169
10170 static unsigned int
10171 i9xx_cursor_max_stride(struct intel_plane *plane,
10172 u32 pixel_format, u64 modifier,
10173 unsigned int rotation)
10174 {
10175 return plane->base.dev->mode_config.cursor_width * 4;
10176 }
10177
10178 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10179 {
10180 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10181 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10182 u32 cntl = 0;
10183
10184 if (INTEL_GEN(dev_priv) >= 11)
10185 return cntl;
10186
10187 cntl |= MCURSOR_GAMMA_ENABLE;
10188
10189 if (HAS_DDI(dev_priv))
10190 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10191
10192 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10193 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10194
10195 return cntl;
10196 }
10197
10198 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10199 const struct intel_plane_state *plane_state)
10200 {
10201 struct drm_i915_private *dev_priv =
10202 to_i915(plane_state->base.plane->dev);
10203 u32 cntl = 0;
10204
10205 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10206 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10207
10208 switch (plane_state->base.crtc_w) {
10209 case 64:
10210 cntl |= MCURSOR_MODE_64_ARGB_AX;
10211 break;
10212 case 128:
10213 cntl |= MCURSOR_MODE_128_ARGB_AX;
10214 break;
10215 case 256:
10216 cntl |= MCURSOR_MODE_256_ARGB_AX;
10217 break;
10218 default:
10219 MISSING_CASE(plane_state->base.crtc_w);
10220 return 0;
10221 }
10222
10223 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10224 cntl |= MCURSOR_ROTATE_180;
10225
10226 return cntl;
10227 }
10228
10229 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10230 {
10231 struct drm_i915_private *dev_priv =
10232 to_i915(plane_state->base.plane->dev);
10233 int width = plane_state->base.crtc_w;
10234 int height = plane_state->base.crtc_h;
10235
10236 if (!intel_cursor_size_ok(plane_state))
10237 return false;
10238
10239 /* Cursor width is limited to a few power-of-two sizes */
10240 switch (width) {
10241 case 256:
10242 case 128:
10243 case 64:
10244 break;
10245 default:
10246 return false;
10247 }
10248
10249 /*
10250 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10251 * height from 8 lines up to the cursor width, when the
10252 * cursor is not rotated. Everything else requires square
10253 * cursors.
10254 */
10255 if (HAS_CUR_FBC(dev_priv) &&
10256 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10257 if (height < 8 || height > width)
10258 return false;
10259 } else {
10260 if (height != width)
10261 return false;
10262 }
10263
10264 return true;
10265 }
10266
10267 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10268 struct intel_plane_state *plane_state)
10269 {
10270 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10271 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10272 const struct drm_framebuffer *fb = plane_state->base.fb;
10273 enum pipe pipe = plane->pipe;
10274 int ret;
10275
10276 ret = intel_check_cursor(crtc_state, plane_state);
10277 if (ret)
10278 return ret;
10279
10280 /* if we want to turn off the cursor ignore width and height */
10281 if (!fb)
10282 return 0;
10283
10284 /* Check for which cursor types we support */
10285 if (!i9xx_cursor_size_ok(plane_state)) {
10286 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10287 plane_state->base.crtc_w,
10288 plane_state->base.crtc_h);
10289 return -EINVAL;
10290 }
10291
10292 WARN_ON(plane_state->base.visible &&
10293 plane_state->color_plane[0].stride != fb->pitches[0]);
10294
10295 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10296 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10297 fb->pitches[0], plane_state->base.crtc_w);
10298 return -EINVAL;
10299 }
10300
10301 /*
10302 * There's something wrong with the cursor on CHV pipe C.
10303 * If it straddles the left edge of the screen then
10304 * moving it away from the edge or disabling it often
10305 * results in a pipe underrun, and often that can lead to
10306 * dead pipe (constant underrun reported, and it scans
10307 * out just a solid color). To recover from that, the
10308 * display power well must be turned off and on again.
10309 * Refuse the put the cursor into that compromised position.
10310 */
10311 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10312 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10313 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10314 return -EINVAL;
10315 }
10316
10317 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10318
10319 return 0;
10320 }
10321
10322 static void i9xx_update_cursor(struct intel_plane *plane,
10323 const struct intel_crtc_state *crtc_state,
10324 const struct intel_plane_state *plane_state)
10325 {
10326 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10327 enum pipe pipe = plane->pipe;
10328 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10329 unsigned long irqflags;
10330
10331 if (plane_state && plane_state->base.visible) {
10332 cntl = plane_state->ctl |
10333 i9xx_cursor_ctl_crtc(crtc_state);
10334
10335 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10336 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10337
10338 base = intel_cursor_base(plane_state);
10339 pos = intel_cursor_position(plane_state);
10340 }
10341
10342 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10343
10344 /*
10345 * On some platforms writing CURCNTR first will also
10346 * cause CURPOS to be armed by the CURBASE write.
10347 * Without the CURCNTR write the CURPOS write would
10348 * arm itself. Thus we always update CURCNTR before
10349 * CURPOS.
10350 *
10351 * On other platforms CURPOS always requires the
10352 * CURBASE write to arm the update. Additonally
10353 * a write to any of the cursor register will cancel
10354 * an already armed cursor update. Thus leaving out
10355 * the CURBASE write after CURPOS could lead to a
10356 * cursor that doesn't appear to move, or even change
10357 * shape. Thus we always write CURBASE.
10358 *
10359 * The other registers are armed by by the CURBASE write
10360 * except when the plane is getting enabled at which time
10361 * the CURCNTR write arms the update.
10362 */
10363
10364 if (INTEL_GEN(dev_priv) >= 9)
10365 skl_write_cursor_wm(plane, crtc_state);
10366
10367 if (plane->cursor.base != base ||
10368 plane->cursor.size != fbc_ctl ||
10369 plane->cursor.cntl != cntl) {
10370 if (HAS_CUR_FBC(dev_priv))
10371 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10372 I915_WRITE_FW(CURCNTR(pipe), cntl);
10373 I915_WRITE_FW(CURPOS(pipe), pos);
10374 I915_WRITE_FW(CURBASE(pipe), base);
10375
10376 plane->cursor.base = base;
10377 plane->cursor.size = fbc_ctl;
10378 plane->cursor.cntl = cntl;
10379 } else {
10380 I915_WRITE_FW(CURPOS(pipe), pos);
10381 I915_WRITE_FW(CURBASE(pipe), base);
10382 }
10383
10384 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10385 }
10386
10387 static void i9xx_disable_cursor(struct intel_plane *plane,
10388 const struct intel_crtc_state *crtc_state)
10389 {
10390 i9xx_update_cursor(plane, crtc_state, NULL);
10391 }
10392
10393 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10394 enum pipe *pipe)
10395 {
10396 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10397 enum intel_display_power_domain power_domain;
10398 intel_wakeref_t wakeref;
10399 bool ret;
10400 u32 val;
10401
10402 /*
10403 * Not 100% correct for planes that can move between pipes,
10404 * but that's only the case for gen2-3 which don't have any
10405 * display power wells.
10406 */
10407 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10408 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10409 if (!wakeref)
10410 return false;
10411
10412 val = I915_READ(CURCNTR(plane->pipe));
10413
10414 ret = val & MCURSOR_MODE;
10415
10416 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10417 *pipe = plane->pipe;
10418 else
10419 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10420 MCURSOR_PIPE_SELECT_SHIFT;
10421
10422 intel_display_power_put(dev_priv, power_domain, wakeref);
10423
10424 return ret;
10425 }
10426
10427 /* VESA 640x480x72Hz mode to set on the pipe */
10428 static const struct drm_display_mode load_detect_mode = {
10429 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10430 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10431 };
10432
10433 struct drm_framebuffer *
10434 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10435 struct drm_mode_fb_cmd2 *mode_cmd)
10436 {
10437 struct intel_framebuffer *intel_fb;
10438 int ret;
10439
10440 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10441 if (!intel_fb)
10442 return ERR_PTR(-ENOMEM);
10443
10444 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10445 if (ret)
10446 goto err;
10447
10448 return &intel_fb->base;
10449
10450 err:
10451 kfree(intel_fb);
10452 return ERR_PTR(ret);
10453 }
10454
10455 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10456 struct drm_crtc *crtc)
10457 {
10458 struct drm_plane *plane;
10459 struct drm_plane_state *plane_state;
10460 int ret, i;
10461
10462 ret = drm_atomic_add_affected_planes(state, crtc);
10463 if (ret)
10464 return ret;
10465
10466 for_each_new_plane_in_state(state, plane, plane_state, i) {
10467 if (plane_state->crtc != crtc)
10468 continue;
10469
10470 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10471 if (ret)
10472 return ret;
10473
10474 drm_atomic_set_fb_for_plane(plane_state, NULL);
10475 }
10476
10477 return 0;
10478 }
10479
10480 int intel_get_load_detect_pipe(struct drm_connector *connector,
10481 const struct drm_display_mode *mode,
10482 struct intel_load_detect_pipe *old,
10483 struct drm_modeset_acquire_ctx *ctx)
10484 {
10485 struct intel_crtc *intel_crtc;
10486 struct intel_encoder *intel_encoder =
10487 intel_attached_encoder(connector);
10488 struct drm_crtc *possible_crtc;
10489 struct drm_encoder *encoder = &intel_encoder->base;
10490 struct drm_crtc *crtc = NULL;
10491 struct drm_device *dev = encoder->dev;
10492 struct drm_i915_private *dev_priv = to_i915(dev);
10493 struct drm_mode_config *config = &dev->mode_config;
10494 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10495 struct drm_connector_state *connector_state;
10496 struct intel_crtc_state *crtc_state;
10497 int ret, i = -1;
10498
10499 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10500 connector->base.id, connector->name,
10501 encoder->base.id, encoder->name);
10502
10503 old->restore_state = NULL;
10504
10505 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10506
10507 /*
10508 * Algorithm gets a little messy:
10509 *
10510 * - if the connector already has an assigned crtc, use it (but make
10511 * sure it's on first)
10512 *
10513 * - try to find the first unused crtc that can drive this connector,
10514 * and use that if we find one
10515 */
10516
10517 /* See if we already have a CRTC for this connector */
10518 if (connector->state->crtc) {
10519 crtc = connector->state->crtc;
10520
10521 ret = drm_modeset_lock(&crtc->mutex, ctx);
10522 if (ret)
10523 goto fail;
10524
10525 /* Make sure the crtc and connector are running */
10526 goto found;
10527 }
10528
10529 /* Find an unused one (if possible) */
10530 for_each_crtc(dev, possible_crtc) {
10531 i++;
10532 if (!(encoder->possible_crtcs & (1 << i)))
10533 continue;
10534
10535 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10536 if (ret)
10537 goto fail;
10538
10539 if (possible_crtc->state->enable) {
10540 drm_modeset_unlock(&possible_crtc->mutex);
10541 continue;
10542 }
10543
10544 crtc = possible_crtc;
10545 break;
10546 }
10547
10548 /*
10549 * If we didn't find an unused CRTC, don't use any.
10550 */
10551 if (!crtc) {
10552 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10553 ret = -ENODEV;
10554 goto fail;
10555 }
10556
10557 found:
10558 intel_crtc = to_intel_crtc(crtc);
10559
10560 state = drm_atomic_state_alloc(dev);
10561 restore_state = drm_atomic_state_alloc(dev);
10562 if (!state || !restore_state) {
10563 ret = -ENOMEM;
10564 goto fail;
10565 }
10566
10567 state->acquire_ctx = ctx;
10568 restore_state->acquire_ctx = ctx;
10569
10570 connector_state = drm_atomic_get_connector_state(state, connector);
10571 if (IS_ERR(connector_state)) {
10572 ret = PTR_ERR(connector_state);
10573 goto fail;
10574 }
10575
10576 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10577 if (ret)
10578 goto fail;
10579
10580 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10581 if (IS_ERR(crtc_state)) {
10582 ret = PTR_ERR(crtc_state);
10583 goto fail;
10584 }
10585
10586 crtc_state->base.active = crtc_state->base.enable = true;
10587
10588 if (!mode)
10589 mode = &load_detect_mode;
10590
10591 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10592 if (ret)
10593 goto fail;
10594
10595 ret = intel_modeset_disable_planes(state, crtc);
10596 if (ret)
10597 goto fail;
10598
10599 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10600 if (!ret)
10601 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10602 if (!ret)
10603 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10604 if (ret) {
10605 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10606 goto fail;
10607 }
10608
10609 ret = drm_atomic_commit(state);
10610 if (ret) {
10611 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10612 goto fail;
10613 }
10614
10615 old->restore_state = restore_state;
10616 drm_atomic_state_put(state);
10617
10618 /* let the connector get through one full cycle before testing */
10619 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10620 return true;
10621
10622 fail:
10623 if (state) {
10624 drm_atomic_state_put(state);
10625 state = NULL;
10626 }
10627 if (restore_state) {
10628 drm_atomic_state_put(restore_state);
10629 restore_state = NULL;
10630 }
10631
10632 if (ret == -EDEADLK)
10633 return ret;
10634
10635 return false;
10636 }
10637
10638 void intel_release_load_detect_pipe(struct drm_connector *connector,
10639 struct intel_load_detect_pipe *old,
10640 struct drm_modeset_acquire_ctx *ctx)
10641 {
10642 struct intel_encoder *intel_encoder =
10643 intel_attached_encoder(connector);
10644 struct drm_encoder *encoder = &intel_encoder->base;
10645 struct drm_atomic_state *state = old->restore_state;
10646 int ret;
10647
10648 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10649 connector->base.id, connector->name,
10650 encoder->base.id, encoder->name);
10651
10652 if (!state)
10653 return;
10654
10655 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10656 if (ret)
10657 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10658 drm_atomic_state_put(state);
10659 }
10660
10661 static int i9xx_pll_refclk(struct drm_device *dev,
10662 const struct intel_crtc_state *pipe_config)
10663 {
10664 struct drm_i915_private *dev_priv = to_i915(dev);
10665 u32 dpll = pipe_config->dpll_hw_state.dpll;
10666
10667 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10668 return dev_priv->vbt.lvds_ssc_freq;
10669 else if (HAS_PCH_SPLIT(dev_priv))
10670 return 120000;
10671 else if (!IS_GEN(dev_priv, 2))
10672 return 96000;
10673 else
10674 return 48000;
10675 }
10676
10677 /* Returns the clock of the currently programmed mode of the given pipe. */
10678 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10679 struct intel_crtc_state *pipe_config)
10680 {
10681 struct drm_device *dev = crtc->base.dev;
10682 struct drm_i915_private *dev_priv = to_i915(dev);
10683 int pipe = pipe_config->cpu_transcoder;
10684 u32 dpll = pipe_config->dpll_hw_state.dpll;
10685 u32 fp;
10686 struct dpll clock;
10687 int port_clock;
10688 int refclk = i9xx_pll_refclk(dev, pipe_config);
10689
10690 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10691 fp = pipe_config->dpll_hw_state.fp0;
10692 else
10693 fp = pipe_config->dpll_hw_state.fp1;
10694
10695 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10696 if (IS_PINEVIEW(dev_priv)) {
10697 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10698 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10699 } else {
10700 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10701 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10702 }
10703
10704 if (!IS_GEN(dev_priv, 2)) {
10705 if (IS_PINEVIEW(dev_priv))
10706 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10707 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10708 else
10709 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10710 DPLL_FPA01_P1_POST_DIV_SHIFT);
10711
10712 switch (dpll & DPLL_MODE_MASK) {
10713 case DPLLB_MODE_DAC_SERIAL:
10714 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10715 5 : 10;
10716 break;
10717 case DPLLB_MODE_LVDS:
10718 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10719 7 : 14;
10720 break;
10721 default:
10722 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10723 "mode\n", (int)(dpll & DPLL_MODE_MASK));
10724 return;
10725 }
10726
10727 if (IS_PINEVIEW(dev_priv))
10728 port_clock = pnv_calc_dpll_params(refclk, &clock);
10729 else
10730 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10731 } else {
10732 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10733 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10734
10735 if (is_lvds) {
10736 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10737 DPLL_FPA01_P1_POST_DIV_SHIFT);
10738
10739 if (lvds & LVDS_CLKB_POWER_UP)
10740 clock.p2 = 7;
10741 else
10742 clock.p2 = 14;
10743 } else {
10744 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10745 clock.p1 = 2;
10746 else {
10747 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10748 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10749 }
10750 if (dpll & PLL_P2_DIVIDE_BY_4)
10751 clock.p2 = 4;
10752 else
10753 clock.p2 = 2;
10754 }
10755
10756 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10757 }
10758
10759 /*
10760 * This value includes pixel_multiplier. We will use
10761 * port_clock to compute adjusted_mode.crtc_clock in the
10762 * encoder's get_config() function.
10763 */
10764 pipe_config->port_clock = port_clock;
10765 }
10766
10767 int intel_dotclock_calculate(int link_freq,
10768 const struct intel_link_m_n *m_n)
10769 {
10770 /*
10771 * The calculation for the data clock is:
10772 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10773 * But we want to avoid losing precison if possible, so:
10774 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10775 *
10776 * and the link clock is simpler:
10777 * link_clock = (m * link_clock) / n
10778 */
10779
10780 if (!m_n->link_n)
10781 return 0;
10782
10783 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10784 }
10785
10786 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10787 struct intel_crtc_state *pipe_config)
10788 {
10789 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10790
10791 /* read out port_clock from the DPLL */
10792 i9xx_crtc_clock_get(crtc, pipe_config);
10793
10794 /*
10795 * In case there is an active pipe without active ports,
10796 * we may need some idea for the dotclock anyway.
10797 * Calculate one based on the FDI configuration.
10798 */
10799 pipe_config->base.adjusted_mode.crtc_clock =
10800 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10801 &pipe_config->fdi_m_n);
10802 }
10803
10804 /* Returns the currently programmed mode of the given encoder. */
10805 struct drm_display_mode *
10806 intel_encoder_current_mode(struct intel_encoder *encoder)
10807 {
10808 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10809 struct intel_crtc_state *crtc_state;
10810 struct drm_display_mode *mode;
10811 struct intel_crtc *crtc;
10812 enum pipe pipe;
10813
10814 if (!encoder->get_hw_state(encoder, &pipe))
10815 return NULL;
10816
10817 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10818
10819 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10820 if (!mode)
10821 return NULL;
10822
10823 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10824 if (!crtc_state) {
10825 kfree(mode);
10826 return NULL;
10827 }
10828
10829 crtc_state->base.crtc = &crtc->base;
10830
10831 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10832 kfree(crtc_state);
10833 kfree(mode);
10834 return NULL;
10835 }
10836
10837 encoder->get_config(encoder, crtc_state);
10838
10839 intel_mode_from_pipe_config(mode, crtc_state);
10840
10841 kfree(crtc_state);
10842
10843 return mode;
10844 }
10845
10846 static void intel_crtc_destroy(struct drm_crtc *crtc)
10847 {
10848 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10849
10850 drm_crtc_cleanup(crtc);
10851 kfree(intel_crtc);
10852 }
10853
10854 /**
10855 * intel_wm_need_update - Check whether watermarks need updating
10856 * @cur: current plane state
10857 * @new: new plane state
10858 *
10859 * Check current plane state versus the new one to determine whether
10860 * watermarks need to be recalculated.
10861 *
10862 * Returns true or false.
10863 */
10864 static bool intel_wm_need_update(struct intel_plane_state *cur,
10865 struct intel_plane_state *new)
10866 {
10867 /* Update watermarks on tiling or size changes. */
10868 if (new->base.visible != cur->base.visible)
10869 return true;
10870
10871 if (!cur->base.fb || !new->base.fb)
10872 return false;
10873
10874 if (cur->base.fb->modifier != new->base.fb->modifier ||
10875 cur->base.rotation != new->base.rotation ||
10876 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10877 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10878 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10879 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10880 return true;
10881
10882 return false;
10883 }
10884
10885 static bool needs_scaling(const struct intel_plane_state *state)
10886 {
10887 int src_w = drm_rect_width(&state->base.src) >> 16;
10888 int src_h = drm_rect_height(&state->base.src) >> 16;
10889 int dst_w = drm_rect_width(&state->base.dst);
10890 int dst_h = drm_rect_height(&state->base.dst);
10891
10892 return (src_w != dst_w || src_h != dst_h);
10893 }
10894
10895 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10896 struct drm_crtc_state *crtc_state,
10897 const struct intel_plane_state *old_plane_state,
10898 struct drm_plane_state *plane_state)
10899 {
10900 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10901 struct drm_crtc *crtc = crtc_state->crtc;
10902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10903 struct intel_plane *plane = to_intel_plane(plane_state->plane);
10904 struct drm_device *dev = crtc->dev;
10905 struct drm_i915_private *dev_priv = to_i915(dev);
10906 bool mode_changed = needs_modeset(crtc_state);
10907 bool was_crtc_enabled = old_crtc_state->base.active;
10908 bool is_crtc_enabled = crtc_state->active;
10909 bool turn_off, turn_on, visible, was_visible;
10910 struct drm_framebuffer *fb = plane_state->fb;
10911 int ret;
10912
10913 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10914 ret = skl_update_scaler_plane(
10915 to_intel_crtc_state(crtc_state),
10916 to_intel_plane_state(plane_state));
10917 if (ret)
10918 return ret;
10919 }
10920
10921 was_visible = old_plane_state->base.visible;
10922 visible = plane_state->visible;
10923
10924 if (!was_crtc_enabled && WARN_ON(was_visible))
10925 was_visible = false;
10926
10927 /*
10928 * Visibility is calculated as if the crtc was on, but
10929 * after scaler setup everything depends on it being off
10930 * when the crtc isn't active.
10931 *
10932 * FIXME this is wrong for watermarks. Watermarks should also
10933 * be computed as if the pipe would be active. Perhaps move
10934 * per-plane wm computation to the .check_plane() hook, and
10935 * only combine the results from all planes in the current place?
10936 */
10937 if (!is_crtc_enabled) {
10938 plane_state->visible = visible = false;
10939 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10940 }
10941
10942 if (!was_visible && !visible)
10943 return 0;
10944
10945 if (fb != old_plane_state->base.fb)
10946 pipe_config->fb_changed = true;
10947
10948 turn_off = was_visible && (!visible || mode_changed);
10949 turn_on = visible && (!was_visible || mode_changed);
10950
10951 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10952 intel_crtc->base.base.id, intel_crtc->base.name,
10953 plane->base.base.id, plane->base.name,
10954 fb ? fb->base.id : -1);
10955
10956 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10957 plane->base.base.id, plane->base.name,
10958 was_visible, visible,
10959 turn_off, turn_on, mode_changed);
10960
10961 if (turn_on) {
10962 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10963 pipe_config->update_wm_pre = true;
10964
10965 /* must disable cxsr around plane enable/disable */
10966 if (plane->id != PLANE_CURSOR)
10967 pipe_config->disable_cxsr = true;
10968 } else if (turn_off) {
10969 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10970 pipe_config->update_wm_post = true;
10971
10972 /* must disable cxsr around plane enable/disable */
10973 if (plane->id != PLANE_CURSOR)
10974 pipe_config->disable_cxsr = true;
10975 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
10976 to_intel_plane_state(plane_state))) {
10977 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10978 /* FIXME bollocks */
10979 pipe_config->update_wm_pre = true;
10980 pipe_config->update_wm_post = true;
10981 }
10982 }
10983
10984 if (visible || was_visible)
10985 pipe_config->fb_bits |= plane->frontbuffer_bit;
10986
10987 /*
10988 * ILK/SNB DVSACNTR/Sprite Enable
10989 * IVB SPR_CTL/Sprite Enable
10990 * "When in Self Refresh Big FIFO mode, a write to enable the
10991 * plane will be internally buffered and delayed while Big FIFO
10992 * mode is exiting."
10993 *
10994 * Which means that enabling the sprite can take an extra frame
10995 * when we start in big FIFO mode (LP1+). Thus we need to drop
10996 * down to LP0 and wait for vblank in order to make sure the
10997 * sprite gets enabled on the next vblank after the register write.
10998 * Doing otherwise would risk enabling the sprite one frame after
10999 * we've already signalled flip completion. We can resume LP1+
11000 * once the sprite has been enabled.
11001 *
11002 *
11003 * WaCxSRDisabledForSpriteScaling:ivb
11004 * IVB SPR_SCALE/Scaling Enable
11005 * "Low Power watermarks must be disabled for at least one
11006 * frame before enabling sprite scaling, and kept disabled
11007 * until sprite scaling is disabled."
11008 *
11009 * ILK/SNB DVSASCALE/Scaling Enable
11010 * "When in Self Refresh Big FIFO mode, scaling enable will be
11011 * masked off while Big FIFO mode is exiting."
11012 *
11013 * Despite the w/a only being listed for IVB we assume that
11014 * the ILK/SNB note has similar ramifications, hence we apply
11015 * the w/a on all three platforms.
11016 *
11017 * With experimental results seems this is needed also for primary
11018 * plane, not only sprite plane.
11019 */
11020 if (plane->id != PLANE_CURSOR &&
11021 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11022 IS_IVYBRIDGE(dev_priv)) &&
11023 (turn_on || (!needs_scaling(old_plane_state) &&
11024 needs_scaling(to_intel_plane_state(plane_state)))))
11025 pipe_config->disable_lp_wm = true;
11026
11027 return 0;
11028 }
11029
11030 static bool encoders_cloneable(const struct intel_encoder *a,
11031 const struct intel_encoder *b)
11032 {
11033 /* masks could be asymmetric, so check both ways */
11034 return a == b || (a->cloneable & (1 << b->type) &&
11035 b->cloneable & (1 << a->type));
11036 }
11037
11038 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11039 struct intel_crtc *crtc,
11040 struct intel_encoder *encoder)
11041 {
11042 struct intel_encoder *source_encoder;
11043 struct drm_connector *connector;
11044 struct drm_connector_state *connector_state;
11045 int i;
11046
11047 for_each_new_connector_in_state(state, connector, connector_state, i) {
11048 if (connector_state->crtc != &crtc->base)
11049 continue;
11050
11051 source_encoder =
11052 to_intel_encoder(connector_state->best_encoder);
11053 if (!encoders_cloneable(encoder, source_encoder))
11054 return false;
11055 }
11056
11057 return true;
11058 }
11059
11060 static int icl_add_linked_planes(struct intel_atomic_state *state)
11061 {
11062 struct intel_plane *plane, *linked;
11063 struct intel_plane_state *plane_state, *linked_plane_state;
11064 int i;
11065
11066 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11067 linked = plane_state->linked_plane;
11068
11069 if (!linked)
11070 continue;
11071
11072 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11073 if (IS_ERR(linked_plane_state))
11074 return PTR_ERR(linked_plane_state);
11075
11076 WARN_ON(linked_plane_state->linked_plane != plane);
11077 WARN_ON(linked_plane_state->slave == plane_state->slave);
11078 }
11079
11080 return 0;
11081 }
11082
11083 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11084 {
11085 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11086 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11087 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11088 struct intel_plane *plane, *linked;
11089 struct intel_plane_state *plane_state;
11090 int i;
11091
11092 if (INTEL_GEN(dev_priv) < 11)
11093 return 0;
11094
11095 /*
11096 * Destroy all old plane links and make the slave plane invisible
11097 * in the crtc_state->active_planes mask.
11098 */
11099 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11100 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11101 continue;
11102
11103 plane_state->linked_plane = NULL;
11104 if (plane_state->slave && !plane_state->base.visible) {
11105 crtc_state->active_planes &= ~BIT(plane->id);
11106 crtc_state->update_planes |= BIT(plane->id);
11107 }
11108
11109 plane_state->slave = false;
11110 }
11111
11112 if (!crtc_state->nv12_planes)
11113 return 0;
11114
11115 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11116 struct intel_plane_state *linked_state = NULL;
11117
11118 if (plane->pipe != crtc->pipe ||
11119 !(crtc_state->nv12_planes & BIT(plane->id)))
11120 continue;
11121
11122 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11123 if (!icl_is_nv12_y_plane(linked->id))
11124 continue;
11125
11126 if (crtc_state->active_planes & BIT(linked->id))
11127 continue;
11128
11129 linked_state = intel_atomic_get_plane_state(state, linked);
11130 if (IS_ERR(linked_state))
11131 return PTR_ERR(linked_state);
11132
11133 break;
11134 }
11135
11136 if (!linked_state) {
11137 DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
11138 hweight8(crtc_state->nv12_planes));
11139
11140 return -EINVAL;
11141 }
11142
11143 plane_state->linked_plane = linked;
11144
11145 linked_state->slave = true;
11146 linked_state->linked_plane = plane;
11147 crtc_state->active_planes |= BIT(linked->id);
11148 crtc_state->update_planes |= BIT(linked->id);
11149 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11150 }
11151
11152 return 0;
11153 }
11154
11155 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11156 struct drm_crtc_state *crtc_state)
11157 {
11158 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11159 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11160 struct intel_crtc_state *pipe_config =
11161 to_intel_crtc_state(crtc_state);
11162 int ret;
11163 bool mode_changed = needs_modeset(crtc_state);
11164
11165 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11166 mode_changed && !crtc_state->active)
11167 pipe_config->update_wm_post = true;
11168
11169 if (mode_changed && crtc_state->enable &&
11170 dev_priv->display.crtc_compute_clock &&
11171 !WARN_ON(pipe_config->shared_dpll)) {
11172 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11173 pipe_config);
11174 if (ret)
11175 return ret;
11176 }
11177
11178 if (mode_changed || crtc_state->color_mgmt_changed) {
11179 ret = intel_color_check(pipe_config);
11180 if (ret)
11181 return ret;
11182
11183 /*
11184 * Changing color management on Intel hardware is
11185 * handled as part of planes update.
11186 */
11187 crtc_state->planes_changed = true;
11188 }
11189
11190 ret = 0;
11191 if (dev_priv->display.compute_pipe_wm) {
11192 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11193 if (ret) {
11194 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11195 return ret;
11196 }
11197 }
11198
11199 if (dev_priv->display.compute_intermediate_wm) {
11200 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11201 return 0;
11202
11203 /*
11204 * Calculate 'intermediate' watermarks that satisfy both the
11205 * old state and the new state. We can program these
11206 * immediately.
11207 */
11208 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11209 if (ret) {
11210 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11211 return ret;
11212 }
11213 }
11214
11215 if (INTEL_GEN(dev_priv) >= 9) {
11216 if (mode_changed || pipe_config->update_pipe)
11217 ret = skl_update_scaler_crtc(pipe_config);
11218
11219 if (!ret)
11220 ret = icl_check_nv12_planes(pipe_config);
11221 if (!ret)
11222 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11223 pipe_config);
11224 if (!ret)
11225 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11226 pipe_config);
11227 }
11228
11229 if (HAS_IPS(dev_priv))
11230 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11231
11232 return ret;
11233 }
11234
11235 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11236 .atomic_check = intel_crtc_atomic_check,
11237 };
11238
11239 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11240 {
11241 struct intel_connector *connector;
11242 struct drm_connector_list_iter conn_iter;
11243
11244 drm_connector_list_iter_begin(dev, &conn_iter);
11245 for_each_intel_connector_iter(connector, &conn_iter) {
11246 if (connector->base.state->crtc)
11247 drm_connector_put(&connector->base);
11248
11249 if (connector->base.encoder) {
11250 connector->base.state->best_encoder =
11251 connector->base.encoder;
11252 connector->base.state->crtc =
11253 connector->base.encoder->crtc;
11254
11255 drm_connector_get(&connector->base);
11256 } else {
11257 connector->base.state->best_encoder = NULL;
11258 connector->base.state->crtc = NULL;
11259 }
11260 }
11261 drm_connector_list_iter_end(&conn_iter);
11262 }
11263
11264 static int
11265 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11266 struct intel_crtc_state *pipe_config)
11267 {
11268 struct drm_connector *connector = conn_state->connector;
11269 const struct drm_display_info *info = &connector->display_info;
11270 int bpp;
11271
11272 switch (conn_state->max_bpc) {
11273 case 6 ... 7:
11274 bpp = 6 * 3;
11275 break;
11276 case 8 ... 9:
11277 bpp = 8 * 3;
11278 break;
11279 case 10 ... 11:
11280 bpp = 10 * 3;
11281 break;
11282 case 12:
11283 bpp = 12 * 3;
11284 break;
11285 default:
11286 return -EINVAL;
11287 }
11288
11289 if (bpp < pipe_config->pipe_bpp) {
11290 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11291 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11292 connector->base.id, connector->name,
11293 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11294 pipe_config->pipe_bpp);
11295
11296 pipe_config->pipe_bpp = bpp;
11297 }
11298
11299 return 0;
11300 }
11301
11302 static int
11303 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11304 struct intel_crtc_state *pipe_config)
11305 {
11306 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11307 struct drm_atomic_state *state = pipe_config->base.state;
11308 struct drm_connector *connector;
11309 struct drm_connector_state *connector_state;
11310 int bpp, i;
11311
11312 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11313 IS_CHERRYVIEW(dev_priv)))
11314 bpp = 10*3;
11315 else if (INTEL_GEN(dev_priv) >= 5)
11316 bpp = 12*3;
11317 else
11318 bpp = 8*3;
11319
11320 pipe_config->pipe_bpp = bpp;
11321
11322 /* Clamp display bpp to connector max bpp */
11323 for_each_new_connector_in_state(state, connector, connector_state, i) {
11324 int ret;
11325
11326 if (connector_state->crtc != &crtc->base)
11327 continue;
11328
11329 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11330 if (ret)
11331 return ret;
11332 }
11333
11334 return 0;
11335 }
11336
11337 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11338 {
11339 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11340 "type: 0x%x flags: 0x%x\n",
11341 mode->crtc_clock,
11342 mode->crtc_hdisplay, mode->crtc_hsync_start,
11343 mode->crtc_hsync_end, mode->crtc_htotal,
11344 mode->crtc_vdisplay, mode->crtc_vsync_start,
11345 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11346 }
11347
11348 static inline void
11349 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11350 unsigned int lane_count, struct intel_link_m_n *m_n)
11351 {
11352 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11353 id, lane_count,
11354 m_n->gmch_m, m_n->gmch_n,
11355 m_n->link_m, m_n->link_n, m_n->tu);
11356 }
11357
11358 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11359
11360 static const char * const output_type_str[] = {
11361 OUTPUT_TYPE(UNUSED),
11362 OUTPUT_TYPE(ANALOG),
11363 OUTPUT_TYPE(DVO),
11364 OUTPUT_TYPE(SDVO),
11365 OUTPUT_TYPE(LVDS),
11366 OUTPUT_TYPE(TVOUT),
11367 OUTPUT_TYPE(HDMI),
11368 OUTPUT_TYPE(DP),
11369 OUTPUT_TYPE(EDP),
11370 OUTPUT_TYPE(DSI),
11371 OUTPUT_TYPE(DDI),
11372 OUTPUT_TYPE(DP_MST),
11373 };
11374
11375 #undef OUTPUT_TYPE
11376
11377 static void snprintf_output_types(char *buf, size_t len,
11378 unsigned int output_types)
11379 {
11380 char *str = buf;
11381 int i;
11382
11383 str[0] = '\0';
11384
11385 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11386 int r;
11387
11388 if ((output_types & BIT(i)) == 0)
11389 continue;
11390
11391 r = snprintf(str, len, "%s%s",
11392 str != buf ? "," : "", output_type_str[i]);
11393 if (r >= len)
11394 break;
11395 str += r;
11396 len -= r;
11397
11398 output_types &= ~BIT(i);
11399 }
11400
11401 WARN_ON_ONCE(output_types != 0);
11402 }
11403
11404 static const char * const output_format_str[] = {
11405 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11406 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11407 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11408 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11409 };
11410
11411 static const char *output_formats(enum intel_output_format format)
11412 {
11413 if (format >= ARRAY_SIZE(output_format_str))
11414 format = INTEL_OUTPUT_FORMAT_INVALID;
11415 return output_format_str[format];
11416 }
11417
11418 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11419 struct intel_crtc_state *pipe_config,
11420 const char *context)
11421 {
11422 struct drm_device *dev = crtc->base.dev;
11423 struct drm_i915_private *dev_priv = to_i915(dev);
11424 struct drm_plane *plane;
11425 struct intel_plane *intel_plane;
11426 struct intel_plane_state *state;
11427 struct drm_framebuffer *fb;
11428 char buf[64];
11429
11430 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11431 crtc->base.base.id, crtc->base.name, context);
11432
11433 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11434 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11435 buf, pipe_config->output_types);
11436
11437 DRM_DEBUG_KMS("output format: %s\n",
11438 output_formats(pipe_config->output_format));
11439
11440 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11441 transcoder_name(pipe_config->cpu_transcoder),
11442 pipe_config->pipe_bpp, pipe_config->dither);
11443
11444 if (pipe_config->has_pch_encoder)
11445 intel_dump_m_n_config(pipe_config, "fdi",
11446 pipe_config->fdi_lanes,
11447 &pipe_config->fdi_m_n);
11448
11449 if (intel_crtc_has_dp_encoder(pipe_config)) {
11450 intel_dump_m_n_config(pipe_config, "dp m_n",
11451 pipe_config->lane_count, &pipe_config->dp_m_n);
11452 if (pipe_config->has_drrs)
11453 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11454 pipe_config->lane_count,
11455 &pipe_config->dp_m2_n2);
11456 }
11457
11458 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11459 pipe_config->has_audio, pipe_config->has_infoframe);
11460
11461 DRM_DEBUG_KMS("requested mode:\n");
11462 drm_mode_debug_printmodeline(&pipe_config->base.mode);
11463 DRM_DEBUG_KMS("adjusted mode:\n");
11464 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11465 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11466 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11467 pipe_config->port_clock,
11468 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11469 pipe_config->pixel_rate);
11470
11471 if (INTEL_GEN(dev_priv) >= 9)
11472 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11473 crtc->num_scalers,
11474 pipe_config->scaler_state.scaler_users,
11475 pipe_config->scaler_state.scaler_id);
11476
11477 if (HAS_GMCH(dev_priv))
11478 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11479 pipe_config->gmch_pfit.control,
11480 pipe_config->gmch_pfit.pgm_ratios,
11481 pipe_config->gmch_pfit.lvds_border_bits);
11482 else
11483 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11484 pipe_config->pch_pfit.pos,
11485 pipe_config->pch_pfit.size,
11486 enableddisabled(pipe_config->pch_pfit.enabled));
11487
11488 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11489 pipe_config->ips_enabled, pipe_config->double_wide);
11490
11491 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11492
11493 DRM_DEBUG_KMS("planes on this crtc\n");
11494 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11495 struct drm_format_name_buf format_name;
11496 intel_plane = to_intel_plane(plane);
11497 if (intel_plane->pipe != crtc->pipe)
11498 continue;
11499
11500 state = to_intel_plane_state(plane->state);
11501 fb = state->base.fb;
11502 if (!fb) {
11503 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11504 plane->base.id, plane->name, state->scaler_id);
11505 continue;
11506 }
11507
11508 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11509 plane->base.id, plane->name,
11510 fb->base.id, fb->width, fb->height,
11511 drm_get_format_name(fb->format->format, &format_name));
11512 if (INTEL_GEN(dev_priv) >= 9)
11513 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11514 state->scaler_id,
11515 state->base.src.x1 >> 16,
11516 state->base.src.y1 >> 16,
11517 drm_rect_width(&state->base.src) >> 16,
11518 drm_rect_height(&state->base.src) >> 16,
11519 state->base.dst.x1, state->base.dst.y1,
11520 drm_rect_width(&state->base.dst),
11521 drm_rect_height(&state->base.dst));
11522 }
11523 }
11524
11525 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11526 {
11527 struct drm_device *dev = state->dev;
11528 struct drm_connector *connector;
11529 struct drm_connector_list_iter conn_iter;
11530 unsigned int used_ports = 0;
11531 unsigned int used_mst_ports = 0;
11532 bool ret = true;
11533
11534 /*
11535 * Walk the connector list instead of the encoder
11536 * list to detect the problem on ddi platforms
11537 * where there's just one encoder per digital port.
11538 */
11539 drm_connector_list_iter_begin(dev, &conn_iter);
11540 drm_for_each_connector_iter(connector, &conn_iter) {
11541 struct drm_connector_state *connector_state;
11542 struct intel_encoder *encoder;
11543
11544 connector_state = drm_atomic_get_new_connector_state(state, connector);
11545 if (!connector_state)
11546 connector_state = connector->state;
11547
11548 if (!connector_state->best_encoder)
11549 continue;
11550
11551 encoder = to_intel_encoder(connector_state->best_encoder);
11552
11553 WARN_ON(!connector_state->crtc);
11554
11555 switch (encoder->type) {
11556 unsigned int port_mask;
11557 case INTEL_OUTPUT_DDI:
11558 if (WARN_ON(!HAS_DDI(to_i915(dev))))
11559 break;
11560 /* else: fall through */
11561 case INTEL_OUTPUT_DP:
11562 case INTEL_OUTPUT_HDMI:
11563 case INTEL_OUTPUT_EDP:
11564 port_mask = 1 << encoder->port;
11565
11566 /* the same port mustn't appear more than once */
11567 if (used_ports & port_mask)
11568 ret = false;
11569
11570 used_ports |= port_mask;
11571 break;
11572 case INTEL_OUTPUT_DP_MST:
11573 used_mst_ports |=
11574 1 << encoder->port;
11575 break;
11576 default:
11577 break;
11578 }
11579 }
11580 drm_connector_list_iter_end(&conn_iter);
11581
11582 /* can't mix MST and SST/HDMI on the same port */
11583 if (used_ports & used_mst_ports)
11584 return false;
11585
11586 return ret;
11587 }
11588
11589 static int
11590 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11591 {
11592 struct drm_i915_private *dev_priv =
11593 to_i915(crtc_state->base.crtc->dev);
11594 struct intel_crtc_state *saved_state;
11595
11596 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
11597 if (!saved_state)
11598 return -ENOMEM;
11599
11600 /* FIXME: before the switch to atomic started, a new pipe_config was
11601 * kzalloc'd. Code that depends on any field being zero should be
11602 * fixed, so that the crtc_state can be safely duplicated. For now,
11603 * only fields that are know to not cause problems are preserved. */
11604
11605 saved_state->scaler_state = crtc_state->scaler_state;
11606 saved_state->shared_dpll = crtc_state->shared_dpll;
11607 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
11608 saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
11609 saved_state->ips_force_disable = crtc_state->ips_force_disable;
11610 if (IS_G4X(dev_priv) ||
11611 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11612 saved_state->wm = crtc_state->wm;
11613
11614 /* Keep base drm_crtc_state intact, only clear our extended struct */
11615 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11616 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
11617 sizeof(*crtc_state) - sizeof(crtc_state->base));
11618
11619 kfree(saved_state);
11620 return 0;
11621 }
11622
11623 static int
11624 intel_modeset_pipe_config(struct drm_crtc *crtc,
11625 struct intel_crtc_state *pipe_config)
11626 {
11627 struct drm_atomic_state *state = pipe_config->base.state;
11628 struct intel_encoder *encoder;
11629 struct drm_connector *connector;
11630 struct drm_connector_state *connector_state;
11631 int base_bpp, ret;
11632 int i;
11633 bool retry = true;
11634
11635 ret = clear_intel_crtc_state(pipe_config);
11636 if (ret)
11637 return ret;
11638
11639 pipe_config->cpu_transcoder =
11640 (enum transcoder) to_intel_crtc(crtc)->pipe;
11641
11642 /*
11643 * Sanitize sync polarity flags based on requested ones. If neither
11644 * positive or negative polarity is requested, treat this as meaning
11645 * negative polarity.
11646 */
11647 if (!(pipe_config->base.adjusted_mode.flags &
11648 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11649 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11650
11651 if (!(pipe_config->base.adjusted_mode.flags &
11652 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11653 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11654
11655 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11656 pipe_config);
11657 if (ret)
11658 return ret;
11659
11660 base_bpp = pipe_config->pipe_bpp;
11661
11662 /*
11663 * Determine the real pipe dimensions. Note that stereo modes can
11664 * increase the actual pipe size due to the frame doubling and
11665 * insertion of additional space for blanks between the frame. This
11666 * is stored in the crtc timings. We use the requested mode to do this
11667 * computation to clearly distinguish it from the adjusted mode, which
11668 * can be changed by the connectors in the below retry loop.
11669 */
11670 drm_mode_get_hv_timing(&pipe_config->base.mode,
11671 &pipe_config->pipe_src_w,
11672 &pipe_config->pipe_src_h);
11673
11674 for_each_new_connector_in_state(state, connector, connector_state, i) {
11675 if (connector_state->crtc != crtc)
11676 continue;
11677
11678 encoder = to_intel_encoder(connector_state->best_encoder);
11679
11680 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11681 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11682 return -EINVAL;
11683 }
11684
11685 /*
11686 * Determine output_types before calling the .compute_config()
11687 * hooks so that the hooks can use this information safely.
11688 */
11689 if (encoder->compute_output_type)
11690 pipe_config->output_types |=
11691 BIT(encoder->compute_output_type(encoder, pipe_config,
11692 connector_state));
11693 else
11694 pipe_config->output_types |= BIT(encoder->type);
11695 }
11696
11697 encoder_retry:
11698 /* Ensure the port clock defaults are reset when retrying. */
11699 pipe_config->port_clock = 0;
11700 pipe_config->pixel_multiplier = 1;
11701
11702 /* Fill in default crtc timings, allow encoders to overwrite them. */
11703 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11704 CRTC_STEREO_DOUBLE);
11705
11706 /* Pass our mode to the connectors and the CRTC to give them a chance to
11707 * adjust it according to limitations or connector properties, and also
11708 * a chance to reject the mode entirely.
11709 */
11710 for_each_new_connector_in_state(state, connector, connector_state, i) {
11711 if (connector_state->crtc != crtc)
11712 continue;
11713
11714 encoder = to_intel_encoder(connector_state->best_encoder);
11715 ret = encoder->compute_config(encoder, pipe_config,
11716 connector_state);
11717 if (ret < 0) {
11718 if (ret != -EDEADLK)
11719 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11720 ret);
11721 return ret;
11722 }
11723 }
11724
11725 /* Set default port clock if not overwritten by the encoder. Needs to be
11726 * done afterwards in case the encoder adjusts the mode. */
11727 if (!pipe_config->port_clock)
11728 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11729 * pipe_config->pixel_multiplier;
11730
11731 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11732 if (ret == -EDEADLK)
11733 return ret;
11734 if (ret < 0) {
11735 DRM_DEBUG_KMS("CRTC fixup failed\n");
11736 return ret;
11737 }
11738
11739 if (ret == RETRY) {
11740 if (WARN(!retry, "loop in pipe configuration computation\n"))
11741 return -EINVAL;
11742
11743 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11744 retry = false;
11745 goto encoder_retry;
11746 }
11747
11748 /* Dithering seems to not pass-through bits correctly when it should, so
11749 * only enable it on 6bpc panels and when its not a compliance
11750 * test requesting 6bpc video pattern.
11751 */
11752 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11753 !pipe_config->dither_force_disable;
11754 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11755 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11756
11757 return 0;
11758 }
11759
11760 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11761 {
11762 int diff;
11763
11764 if (clock1 == clock2)
11765 return true;
11766
11767 if (!clock1 || !clock2)
11768 return false;
11769
11770 diff = abs(clock1 - clock2);
11771
11772 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11773 return true;
11774
11775 return false;
11776 }
11777
11778 static bool
11779 intel_compare_m_n(unsigned int m, unsigned int n,
11780 unsigned int m2, unsigned int n2,
11781 bool exact)
11782 {
11783 if (m == m2 && n == n2)
11784 return true;
11785
11786 if (exact || !m || !n || !m2 || !n2)
11787 return false;
11788
11789 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11790
11791 if (n > n2) {
11792 while (n > n2) {
11793 m2 <<= 1;
11794 n2 <<= 1;
11795 }
11796 } else if (n < n2) {
11797 while (n < n2) {
11798 m <<= 1;
11799 n <<= 1;
11800 }
11801 }
11802
11803 if (n != n2)
11804 return false;
11805
11806 return intel_fuzzy_clock_check(m, m2);
11807 }
11808
11809 static bool
11810 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11811 struct intel_link_m_n *m2_n2,
11812 bool adjust)
11813 {
11814 if (m_n->tu == m2_n2->tu &&
11815 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11816 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11817 intel_compare_m_n(m_n->link_m, m_n->link_n,
11818 m2_n2->link_m, m2_n2->link_n, !adjust)) {
11819 if (adjust)
11820 *m2_n2 = *m_n;
11821
11822 return true;
11823 }
11824
11825 return false;
11826 }
11827
11828 static void __printf(3, 4)
11829 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11830 {
11831 struct va_format vaf;
11832 va_list args;
11833
11834 va_start(args, format);
11835 vaf.fmt = format;
11836 vaf.va = &args;
11837
11838 if (adjust)
11839 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11840 else
11841 drm_err("mismatch in %s %pV", name, &vaf);
11842
11843 va_end(args);
11844 }
11845
11846 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
11847 {
11848 if (i915_modparams.fastboot != -1)
11849 return i915_modparams.fastboot;
11850
11851 /* Enable fastboot by default on Skylake and newer */
11852 if (INTEL_GEN(dev_priv) >= 9)
11853 return true;
11854
11855 /* Enable fastboot by default on VLV and CHV */
11856 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11857 return true;
11858
11859 /* Disabled by default on all others */
11860 return false;
11861 }
11862
11863 static bool
11864 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11865 struct intel_crtc_state *current_config,
11866 struct intel_crtc_state *pipe_config,
11867 bool adjust)
11868 {
11869 bool ret = true;
11870 bool fixup_inherited = adjust &&
11871 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11872 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11873
11874 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
11875 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
11876 ret = false;
11877 }
11878
11879 #define PIPE_CONF_CHECK_X(name) do { \
11880 if (current_config->name != pipe_config->name) { \
11881 pipe_config_err(adjust, __stringify(name), \
11882 "(expected 0x%08x, found 0x%08x)\n", \
11883 current_config->name, \
11884 pipe_config->name); \
11885 ret = false; \
11886 } \
11887 } while (0)
11888
11889 #define PIPE_CONF_CHECK_I(name) do { \
11890 if (current_config->name != pipe_config->name) { \
11891 pipe_config_err(adjust, __stringify(name), \
11892 "(expected %i, found %i)\n", \
11893 current_config->name, \
11894 pipe_config->name); \
11895 ret = false; \
11896 } \
11897 } while (0)
11898
11899 #define PIPE_CONF_CHECK_BOOL(name) do { \
11900 if (current_config->name != pipe_config->name) { \
11901 pipe_config_err(adjust, __stringify(name), \
11902 "(expected %s, found %s)\n", \
11903 yesno(current_config->name), \
11904 yesno(pipe_config->name)); \
11905 ret = false; \
11906 } \
11907 } while (0)
11908
11909 /*
11910 * Checks state where we only read out the enabling, but not the entire
11911 * state itself (like full infoframes or ELD for audio). These states
11912 * require a full modeset on bootup to fix up.
11913 */
11914 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11915 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11916 PIPE_CONF_CHECK_BOOL(name); \
11917 } else { \
11918 pipe_config_err(adjust, __stringify(name), \
11919 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11920 yesno(current_config->name), \
11921 yesno(pipe_config->name)); \
11922 ret = false; \
11923 } \
11924 } while (0)
11925
11926 #define PIPE_CONF_CHECK_P(name) do { \
11927 if (current_config->name != pipe_config->name) { \
11928 pipe_config_err(adjust, __stringify(name), \
11929 "(expected %p, found %p)\n", \
11930 current_config->name, \
11931 pipe_config->name); \
11932 ret = false; \
11933 } \
11934 } while (0)
11935
11936 #define PIPE_CONF_CHECK_M_N(name) do { \
11937 if (!intel_compare_link_m_n(&current_config->name, \
11938 &pipe_config->name,\
11939 adjust)) { \
11940 pipe_config_err(adjust, __stringify(name), \
11941 "(expected tu %i gmch %i/%i link %i/%i, " \
11942 "found tu %i, gmch %i/%i link %i/%i)\n", \
11943 current_config->name.tu, \
11944 current_config->name.gmch_m, \
11945 current_config->name.gmch_n, \
11946 current_config->name.link_m, \
11947 current_config->name.link_n, \
11948 pipe_config->name.tu, \
11949 pipe_config->name.gmch_m, \
11950 pipe_config->name.gmch_n, \
11951 pipe_config->name.link_m, \
11952 pipe_config->name.link_n); \
11953 ret = false; \
11954 } \
11955 } while (0)
11956
11957 /* This is required for BDW+ where there is only one set of registers for
11958 * switching between high and low RR.
11959 * This macro can be used whenever a comparison has to be made between one
11960 * hw state and multiple sw state variables.
11961 */
11962 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
11963 if (!intel_compare_link_m_n(&current_config->name, \
11964 &pipe_config->name, adjust) && \
11965 !intel_compare_link_m_n(&current_config->alt_name, \
11966 &pipe_config->name, adjust)) { \
11967 pipe_config_err(adjust, __stringify(name), \
11968 "(expected tu %i gmch %i/%i link %i/%i, " \
11969 "or tu %i gmch %i/%i link %i/%i, " \
11970 "found tu %i, gmch %i/%i link %i/%i)\n", \
11971 current_config->name.tu, \
11972 current_config->name.gmch_m, \
11973 current_config->name.gmch_n, \
11974 current_config->name.link_m, \
11975 current_config->name.link_n, \
11976 current_config->alt_name.tu, \
11977 current_config->alt_name.gmch_m, \
11978 current_config->alt_name.gmch_n, \
11979 current_config->alt_name.link_m, \
11980 current_config->alt_name.link_n, \
11981 pipe_config->name.tu, \
11982 pipe_config->name.gmch_m, \
11983 pipe_config->name.gmch_n, \
11984 pipe_config->name.link_m, \
11985 pipe_config->name.link_n); \
11986 ret = false; \
11987 } \
11988 } while (0)
11989
11990 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
11991 if ((current_config->name ^ pipe_config->name) & (mask)) { \
11992 pipe_config_err(adjust, __stringify(name), \
11993 "(%x) (expected %i, found %i)\n", \
11994 (mask), \
11995 current_config->name & (mask), \
11996 pipe_config->name & (mask)); \
11997 ret = false; \
11998 } \
11999 } while (0)
12000
12001 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12002 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12003 pipe_config_err(adjust, __stringify(name), \
12004 "(expected %i, found %i)\n", \
12005 current_config->name, \
12006 pipe_config->name); \
12007 ret = false; \
12008 } \
12009 } while (0)
12010
12011 #define PIPE_CONF_QUIRK(quirk) \
12012 ((current_config->quirks | pipe_config->quirks) & (quirk))
12013
12014 PIPE_CONF_CHECK_I(cpu_transcoder);
12015
12016 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12017 PIPE_CONF_CHECK_I(fdi_lanes);
12018 PIPE_CONF_CHECK_M_N(fdi_m_n);
12019
12020 PIPE_CONF_CHECK_I(lane_count);
12021 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12022
12023 if (INTEL_GEN(dev_priv) < 8) {
12024 PIPE_CONF_CHECK_M_N(dp_m_n);
12025
12026 if (current_config->has_drrs)
12027 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12028 } else
12029 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12030
12031 PIPE_CONF_CHECK_X(output_types);
12032
12033 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12034 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12035 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12036 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12037 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12038 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12039
12040 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12041 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12042 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12043 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12044 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12045 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12046
12047 PIPE_CONF_CHECK_I(pixel_multiplier);
12048 PIPE_CONF_CHECK_I(output_format);
12049 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12050 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12051 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12052 PIPE_CONF_CHECK_BOOL(limited_color_range);
12053
12054 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12055 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12056 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12057
12058 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12059
12060 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12061 DRM_MODE_FLAG_INTERLACE);
12062
12063 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12064 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12065 DRM_MODE_FLAG_PHSYNC);
12066 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12067 DRM_MODE_FLAG_NHSYNC);
12068 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12069 DRM_MODE_FLAG_PVSYNC);
12070 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12071 DRM_MODE_FLAG_NVSYNC);
12072 }
12073
12074 PIPE_CONF_CHECK_X(gmch_pfit.control);
12075 /* pfit ratios are autocomputed by the hw on gen4+ */
12076 if (INTEL_GEN(dev_priv) < 4)
12077 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12078 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12079
12080 if (!adjust) {
12081 PIPE_CONF_CHECK_I(pipe_src_w);
12082 PIPE_CONF_CHECK_I(pipe_src_h);
12083
12084 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12085 if (current_config->pch_pfit.enabled) {
12086 PIPE_CONF_CHECK_X(pch_pfit.pos);
12087 PIPE_CONF_CHECK_X(pch_pfit.size);
12088 }
12089
12090 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12091 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12092 }
12093
12094 PIPE_CONF_CHECK_BOOL(double_wide);
12095
12096 PIPE_CONF_CHECK_P(shared_dpll);
12097 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12098 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12099 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12100 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12101 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12102 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12103 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12104 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12105 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12106 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12107 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12108 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12109 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12110 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12111 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12112 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12113 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12114 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12115 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12116 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12117 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12118 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12119 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12120 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12121 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12122 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12123 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12124 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12125 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12126 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12127 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12128
12129 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12130 PIPE_CONF_CHECK_X(dsi_pll.div);
12131
12132 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12133 PIPE_CONF_CHECK_I(pipe_bpp);
12134
12135 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12136 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12137
12138 PIPE_CONF_CHECK_I(min_voltage_level);
12139
12140 #undef PIPE_CONF_CHECK_X
12141 #undef PIPE_CONF_CHECK_I
12142 #undef PIPE_CONF_CHECK_BOOL
12143 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12144 #undef PIPE_CONF_CHECK_P
12145 #undef PIPE_CONF_CHECK_FLAGS
12146 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12147 #undef PIPE_CONF_QUIRK
12148
12149 return ret;
12150 }
12151
12152 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12153 const struct intel_crtc_state *pipe_config)
12154 {
12155 if (pipe_config->has_pch_encoder) {
12156 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12157 &pipe_config->fdi_m_n);
12158 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12159
12160 /*
12161 * FDI already provided one idea for the dotclock.
12162 * Yell if the encoder disagrees.
12163 */
12164 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12165 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12166 fdi_dotclock, dotclock);
12167 }
12168 }
12169
12170 static void verify_wm_state(struct drm_crtc *crtc,
12171 struct drm_crtc_state *new_state)
12172 {
12173 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12174 struct skl_ddb_allocation hw_ddb, *sw_ddb;
12175 struct skl_pipe_wm hw_wm, *sw_wm;
12176 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12177 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12178 struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
12179 struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
12180 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12181 const enum pipe pipe = intel_crtc->pipe;
12182 int plane, level, max_level = ilk_wm_max_level(dev_priv);
12183
12184 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12185 return;
12186
12187 skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
12188 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12189
12190 skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
12191
12192 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12193 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12194
12195 if (INTEL_GEN(dev_priv) >= 11)
12196 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
12197 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12198 sw_ddb->enabled_slices,
12199 hw_ddb.enabled_slices);
12200 /* planes */
12201 for_each_universal_plane(dev_priv, pipe, plane) {
12202 hw_plane_wm = &hw_wm.planes[plane];
12203 sw_plane_wm = &sw_wm->planes[plane];
12204
12205 /* Watermarks */
12206 for (level = 0; level <= max_level; level++) {
12207 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12208 &sw_plane_wm->wm[level]))
12209 continue;
12210
12211 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12212 pipe_name(pipe), plane + 1, level,
12213 sw_plane_wm->wm[level].plane_en,
12214 sw_plane_wm->wm[level].plane_res_b,
12215 sw_plane_wm->wm[level].plane_res_l,
12216 hw_plane_wm->wm[level].plane_en,
12217 hw_plane_wm->wm[level].plane_res_b,
12218 hw_plane_wm->wm[level].plane_res_l);
12219 }
12220
12221 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12222 &sw_plane_wm->trans_wm)) {
12223 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12224 pipe_name(pipe), plane + 1,
12225 sw_plane_wm->trans_wm.plane_en,
12226 sw_plane_wm->trans_wm.plane_res_b,
12227 sw_plane_wm->trans_wm.plane_res_l,
12228 hw_plane_wm->trans_wm.plane_en,
12229 hw_plane_wm->trans_wm.plane_res_b,
12230 hw_plane_wm->trans_wm.plane_res_l);
12231 }
12232
12233 /* DDB */
12234 hw_ddb_entry = &hw_ddb_y[plane];
12235 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12236
12237 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12238 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12239 pipe_name(pipe), plane + 1,
12240 sw_ddb_entry->start, sw_ddb_entry->end,
12241 hw_ddb_entry->start, hw_ddb_entry->end);
12242 }
12243 }
12244
12245 /*
12246 * cursor
12247 * If the cursor plane isn't active, we may not have updated it's ddb
12248 * allocation. In that case since the ddb allocation will be updated
12249 * once the plane becomes visible, we can skip this check
12250 */
12251 if (1) {
12252 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
12253 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12254
12255 /* Watermarks */
12256 for (level = 0; level <= max_level; level++) {
12257 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12258 &sw_plane_wm->wm[level]))
12259 continue;
12260
12261 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12262 pipe_name(pipe), level,
12263 sw_plane_wm->wm[level].plane_en,
12264 sw_plane_wm->wm[level].plane_res_b,
12265 sw_plane_wm->wm[level].plane_res_l,
12266 hw_plane_wm->wm[level].plane_en,
12267 hw_plane_wm->wm[level].plane_res_b,
12268 hw_plane_wm->wm[level].plane_res_l);
12269 }
12270
12271 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12272 &sw_plane_wm->trans_wm)) {
12273 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12274 pipe_name(pipe),
12275 sw_plane_wm->trans_wm.plane_en,
12276 sw_plane_wm->trans_wm.plane_res_b,
12277 sw_plane_wm->trans_wm.plane_res_l,
12278 hw_plane_wm->trans_wm.plane_en,
12279 hw_plane_wm->trans_wm.plane_res_b,
12280 hw_plane_wm->trans_wm.plane_res_l);
12281 }
12282
12283 /* DDB */
12284 hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
12285 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12286
12287 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12288 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12289 pipe_name(pipe),
12290 sw_ddb_entry->start, sw_ddb_entry->end,
12291 hw_ddb_entry->start, hw_ddb_entry->end);
12292 }
12293 }
12294 }
12295
12296 static void
12297 verify_connector_state(struct drm_device *dev,
12298 struct drm_atomic_state *state,
12299 struct drm_crtc *crtc)
12300 {
12301 struct drm_connector *connector;
12302 struct drm_connector_state *new_conn_state;
12303 int i;
12304
12305 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12306 struct drm_encoder *encoder = connector->encoder;
12307 struct drm_crtc_state *crtc_state = NULL;
12308
12309 if (new_conn_state->crtc != crtc)
12310 continue;
12311
12312 if (crtc)
12313 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12314
12315 intel_connector_verify_state(crtc_state, new_conn_state);
12316
12317 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12318 "connector's atomic encoder doesn't match legacy encoder\n");
12319 }
12320 }
12321
12322 static void
12323 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12324 {
12325 struct intel_encoder *encoder;
12326 struct drm_connector *connector;
12327 struct drm_connector_state *old_conn_state, *new_conn_state;
12328 int i;
12329
12330 for_each_intel_encoder(dev, encoder) {
12331 bool enabled = false, found = false;
12332 enum pipe pipe;
12333
12334 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12335 encoder->base.base.id,
12336 encoder->base.name);
12337
12338 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12339 new_conn_state, i) {
12340 if (old_conn_state->best_encoder == &encoder->base)
12341 found = true;
12342
12343 if (new_conn_state->best_encoder != &encoder->base)
12344 continue;
12345 found = enabled = true;
12346
12347 I915_STATE_WARN(new_conn_state->crtc !=
12348 encoder->base.crtc,
12349 "connector's crtc doesn't match encoder crtc\n");
12350 }
12351
12352 if (!found)
12353 continue;
12354
12355 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12356 "encoder's enabled state mismatch "
12357 "(expected %i, found %i)\n",
12358 !!encoder->base.crtc, enabled);
12359
12360 if (!encoder->base.crtc) {
12361 bool active;
12362
12363 active = encoder->get_hw_state(encoder, &pipe);
12364 I915_STATE_WARN(active,
12365 "encoder detached but still enabled on pipe %c.\n",
12366 pipe_name(pipe));
12367 }
12368 }
12369 }
12370
12371 static void
12372 verify_crtc_state(struct drm_crtc *crtc,
12373 struct drm_crtc_state *old_crtc_state,
12374 struct drm_crtc_state *new_crtc_state)
12375 {
12376 struct drm_device *dev = crtc->dev;
12377 struct drm_i915_private *dev_priv = to_i915(dev);
12378 struct intel_encoder *encoder;
12379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12380 struct intel_crtc_state *pipe_config, *sw_config;
12381 struct drm_atomic_state *old_state;
12382 bool active;
12383
12384 old_state = old_crtc_state->state;
12385 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12386 pipe_config = to_intel_crtc_state(old_crtc_state);
12387 memset(pipe_config, 0, sizeof(*pipe_config));
12388 pipe_config->base.crtc = crtc;
12389 pipe_config->base.state = old_state;
12390
12391 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12392
12393 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12394
12395 /* we keep both pipes enabled on 830 */
12396 if (IS_I830(dev_priv))
12397 active = new_crtc_state->active;
12398
12399 I915_STATE_WARN(new_crtc_state->active != active,
12400 "crtc active state doesn't match with hw state "
12401 "(expected %i, found %i)\n", new_crtc_state->active, active);
12402
12403 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12404 "transitional active state does not match atomic hw state "
12405 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12406
12407 for_each_encoder_on_crtc(dev, crtc, encoder) {
12408 enum pipe pipe;
12409
12410 active = encoder->get_hw_state(encoder, &pipe);
12411 I915_STATE_WARN(active != new_crtc_state->active,
12412 "[ENCODER:%i] active %i with crtc active %i\n",
12413 encoder->base.base.id, active, new_crtc_state->active);
12414
12415 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12416 "Encoder connected to wrong pipe %c\n",
12417 pipe_name(pipe));
12418
12419 if (active)
12420 encoder->get_config(encoder, pipe_config);
12421 }
12422
12423 intel_crtc_compute_pixel_rate(pipe_config);
12424
12425 if (!new_crtc_state->active)
12426 return;
12427
12428 intel_pipe_config_sanity_check(dev_priv, pipe_config);
12429
12430 sw_config = to_intel_crtc_state(new_crtc_state);
12431 if (!intel_pipe_config_compare(dev_priv, sw_config,
12432 pipe_config, false)) {
12433 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12434 intel_dump_pipe_config(intel_crtc, pipe_config,
12435 "[hw state]");
12436 intel_dump_pipe_config(intel_crtc, sw_config,
12437 "[sw state]");
12438 }
12439 }
12440
12441 static void
12442 intel_verify_planes(struct intel_atomic_state *state)
12443 {
12444 struct intel_plane *plane;
12445 const struct intel_plane_state *plane_state;
12446 int i;
12447
12448 for_each_new_intel_plane_in_state(state, plane,
12449 plane_state, i)
12450 assert_plane(plane, plane_state->base.visible);
12451 }
12452
12453 static void
12454 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12455 struct intel_shared_dpll *pll,
12456 struct drm_crtc *crtc,
12457 struct drm_crtc_state *new_state)
12458 {
12459 struct intel_dpll_hw_state dpll_hw_state;
12460 unsigned int crtc_mask;
12461 bool active;
12462
12463 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12464
12465 DRM_DEBUG_KMS("%s\n", pll->info->name);
12466
12467 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12468
12469 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12470 I915_STATE_WARN(!pll->on && pll->active_mask,
12471 "pll in active use but not on in sw tracking\n");
12472 I915_STATE_WARN(pll->on && !pll->active_mask,
12473 "pll is on but not used by any active crtc\n");
12474 I915_STATE_WARN(pll->on != active,
12475 "pll on state mismatch (expected %i, found %i)\n",
12476 pll->on, active);
12477 }
12478
12479 if (!crtc) {
12480 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12481 "more active pll users than references: %x vs %x\n",
12482 pll->active_mask, pll->state.crtc_mask);
12483
12484 return;
12485 }
12486
12487 crtc_mask = drm_crtc_mask(crtc);
12488
12489 if (new_state->active)
12490 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12491 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12492 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12493 else
12494 I915_STATE_WARN(pll->active_mask & crtc_mask,
12495 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12496 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12497
12498 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12499 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12500 crtc_mask, pll->state.crtc_mask);
12501
12502 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12503 &dpll_hw_state,
12504 sizeof(dpll_hw_state)),
12505 "pll hw state mismatch\n");
12506 }
12507
12508 static void
12509 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12510 struct drm_crtc_state *old_crtc_state,
12511 struct drm_crtc_state *new_crtc_state)
12512 {
12513 struct drm_i915_private *dev_priv = to_i915(dev);
12514 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12515 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12516
12517 if (new_state->shared_dpll)
12518 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12519
12520 if (old_state->shared_dpll &&
12521 old_state->shared_dpll != new_state->shared_dpll) {
12522 unsigned int crtc_mask = drm_crtc_mask(crtc);
12523 struct intel_shared_dpll *pll = old_state->shared_dpll;
12524
12525 I915_STATE_WARN(pll->active_mask & crtc_mask,
12526 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12527 pipe_name(drm_crtc_index(crtc)));
12528 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12529 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12530 pipe_name(drm_crtc_index(crtc)));
12531 }
12532 }
12533
12534 static void
12535 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12536 struct drm_atomic_state *state,
12537 struct drm_crtc_state *old_state,
12538 struct drm_crtc_state *new_state)
12539 {
12540 if (!needs_modeset(new_state) &&
12541 !to_intel_crtc_state(new_state)->update_pipe)
12542 return;
12543
12544 verify_wm_state(crtc, new_state);
12545 verify_connector_state(crtc->dev, state, crtc);
12546 verify_crtc_state(crtc, old_state, new_state);
12547 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12548 }
12549
12550 static void
12551 verify_disabled_dpll_state(struct drm_device *dev)
12552 {
12553 struct drm_i915_private *dev_priv = to_i915(dev);
12554 int i;
12555
12556 for (i = 0; i < dev_priv->num_shared_dpll; i++)
12557 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12558 }
12559
12560 static void
12561 intel_modeset_verify_disabled(struct drm_device *dev,
12562 struct drm_atomic_state *state)
12563 {
12564 verify_encoder_state(dev, state);
12565 verify_connector_state(dev, state, NULL);
12566 verify_disabled_dpll_state(dev);
12567 }
12568
12569 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12570 {
12571 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12572 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12573
12574 /*
12575 * The scanline counter increments at the leading edge of hsync.
12576 *
12577 * On most platforms it starts counting from vtotal-1 on the
12578 * first active line. That means the scanline counter value is
12579 * always one less than what we would expect. Ie. just after
12580 * start of vblank, which also occurs at start of hsync (on the
12581 * last active line), the scanline counter will read vblank_start-1.
12582 *
12583 * On gen2 the scanline counter starts counting from 1 instead
12584 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12585 * to keep the value positive), instead of adding one.
12586 *
12587 * On HSW+ the behaviour of the scanline counter depends on the output
12588 * type. For DP ports it behaves like most other platforms, but on HDMI
12589 * there's an extra 1 line difference. So we need to add two instead of
12590 * one to the value.
12591 *
12592 * On VLV/CHV DSI the scanline counter would appear to increment
12593 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12594 * that means we can't tell whether we're in vblank or not while
12595 * we're on that particular line. We must still set scanline_offset
12596 * to 1 so that the vblank timestamps come out correct when we query
12597 * the scanline counter from within the vblank interrupt handler.
12598 * However if queried just before the start of vblank we'll get an
12599 * answer that's slightly in the future.
12600 */
12601 if (IS_GEN(dev_priv, 2)) {
12602 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12603 int vtotal;
12604
12605 vtotal = adjusted_mode->crtc_vtotal;
12606 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12607 vtotal /= 2;
12608
12609 crtc->scanline_offset = vtotal - 1;
12610 } else if (HAS_DDI(dev_priv) &&
12611 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12612 crtc->scanline_offset = 2;
12613 } else
12614 crtc->scanline_offset = 1;
12615 }
12616
12617 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12618 {
12619 struct drm_device *dev = state->dev;
12620 struct drm_i915_private *dev_priv = to_i915(dev);
12621 struct drm_crtc *crtc;
12622 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12623 int i;
12624
12625 if (!dev_priv->display.crtc_compute_clock)
12626 return;
12627
12628 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12629 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12630 struct intel_shared_dpll *old_dpll =
12631 to_intel_crtc_state(old_crtc_state)->shared_dpll;
12632
12633 if (!needs_modeset(new_crtc_state))
12634 continue;
12635
12636 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12637
12638 if (!old_dpll)
12639 continue;
12640
12641 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12642 }
12643 }
12644
12645 /*
12646 * This implements the workaround described in the "notes" section of the mode
12647 * set sequence documentation. When going from no pipes or single pipe to
12648 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12649 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12650 */
12651 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12652 {
12653 struct drm_crtc_state *crtc_state;
12654 struct intel_crtc *intel_crtc;
12655 struct drm_crtc *crtc;
12656 struct intel_crtc_state *first_crtc_state = NULL;
12657 struct intel_crtc_state *other_crtc_state = NULL;
12658 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12659 int i;
12660
12661 /* look at all crtc's that are going to be enabled in during modeset */
12662 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12663 intel_crtc = to_intel_crtc(crtc);
12664
12665 if (!crtc_state->active || !needs_modeset(crtc_state))
12666 continue;
12667
12668 if (first_crtc_state) {
12669 other_crtc_state = to_intel_crtc_state(crtc_state);
12670 break;
12671 } else {
12672 first_crtc_state = to_intel_crtc_state(crtc_state);
12673 first_pipe = intel_crtc->pipe;
12674 }
12675 }
12676
12677 /* No workaround needed? */
12678 if (!first_crtc_state)
12679 return 0;
12680
12681 /* w/a possibly needed, check how many crtc's are already enabled. */
12682 for_each_intel_crtc(state->dev, intel_crtc) {
12683 struct intel_crtc_state *pipe_config;
12684
12685 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12686 if (IS_ERR(pipe_config))
12687 return PTR_ERR(pipe_config);
12688
12689 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12690
12691 if (!pipe_config->base.active ||
12692 needs_modeset(&pipe_config->base))
12693 continue;
12694
12695 /* 2 or more enabled crtcs means no need for w/a */
12696 if (enabled_pipe != INVALID_PIPE)
12697 return 0;
12698
12699 enabled_pipe = intel_crtc->pipe;
12700 }
12701
12702 if (enabled_pipe != INVALID_PIPE)
12703 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12704 else if (other_crtc_state)
12705 other_crtc_state->hsw_workaround_pipe = first_pipe;
12706
12707 return 0;
12708 }
12709
12710 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12711 {
12712 struct drm_crtc *crtc;
12713
12714 /* Add all pipes to the state */
12715 for_each_crtc(state->dev, crtc) {
12716 struct drm_crtc_state *crtc_state;
12717
12718 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12719 if (IS_ERR(crtc_state))
12720 return PTR_ERR(crtc_state);
12721 }
12722
12723 return 0;
12724 }
12725
12726 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12727 {
12728 struct drm_crtc *crtc;
12729
12730 /*
12731 * Add all pipes to the state, and force
12732 * a modeset on all the active ones.
12733 */
12734 for_each_crtc(state->dev, crtc) {
12735 struct drm_crtc_state *crtc_state;
12736 int ret;
12737
12738 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12739 if (IS_ERR(crtc_state))
12740 return PTR_ERR(crtc_state);
12741
12742 if (!crtc_state->active || needs_modeset(crtc_state))
12743 continue;
12744
12745 crtc_state->mode_changed = true;
12746
12747 ret = drm_atomic_add_affected_connectors(state, crtc);
12748 if (ret)
12749 return ret;
12750
12751 ret = drm_atomic_add_affected_planes(state, crtc);
12752 if (ret)
12753 return ret;
12754 }
12755
12756 return 0;
12757 }
12758
12759 static int intel_modeset_checks(struct drm_atomic_state *state)
12760 {
12761 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12762 struct drm_i915_private *dev_priv = to_i915(state->dev);
12763 struct drm_crtc *crtc;
12764 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12765 int ret = 0, i;
12766
12767 if (!check_digital_port_conflicts(state)) {
12768 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12769 return -EINVAL;
12770 }
12771
12772 intel_state->modeset = true;
12773 intel_state->active_crtcs = dev_priv->active_crtcs;
12774 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12775 intel_state->cdclk.actual = dev_priv->cdclk.actual;
12776
12777 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12778 if (new_crtc_state->active)
12779 intel_state->active_crtcs |= 1 << i;
12780 else
12781 intel_state->active_crtcs &= ~(1 << i);
12782
12783 if (old_crtc_state->active != new_crtc_state->active)
12784 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12785 }
12786
12787 /*
12788 * See if the config requires any additional preparation, e.g.
12789 * to adjust global state with pipes off. We need to do this
12790 * here so we can get the modeset_pipe updated config for the new
12791 * mode set on this crtc. For other crtcs we need to use the
12792 * adjusted_mode bits in the crtc directly.
12793 */
12794 if (dev_priv->display.modeset_calc_cdclk) {
12795 ret = dev_priv->display.modeset_calc_cdclk(state);
12796 if (ret < 0)
12797 return ret;
12798
12799 /*
12800 * Writes to dev_priv->cdclk.logical must protected by
12801 * holding all the crtc locks, even if we don't end up
12802 * touching the hardware
12803 */
12804 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12805 &intel_state->cdclk.logical)) {
12806 ret = intel_lock_all_pipes(state);
12807 if (ret < 0)
12808 return ret;
12809 }
12810
12811 /* All pipes must be switched off while we change the cdclk. */
12812 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12813 &intel_state->cdclk.actual)) {
12814 ret = intel_modeset_all_pipes(state);
12815 if (ret < 0)
12816 return ret;
12817 }
12818
12819 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12820 intel_state->cdclk.logical.cdclk,
12821 intel_state->cdclk.actual.cdclk);
12822 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12823 intel_state->cdclk.logical.voltage_level,
12824 intel_state->cdclk.actual.voltage_level);
12825 } else {
12826 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12827 }
12828
12829 intel_modeset_clear_plls(state);
12830
12831 if (IS_HASWELL(dev_priv))
12832 return haswell_mode_set_planes_workaround(state);
12833
12834 return 0;
12835 }
12836
12837 /*
12838 * Handle calculation of various watermark data at the end of the atomic check
12839 * phase. The code here should be run after the per-crtc and per-plane 'check'
12840 * handlers to ensure that all derived state has been updated.
12841 */
12842 static int calc_watermark_data(struct intel_atomic_state *state)
12843 {
12844 struct drm_device *dev = state->base.dev;
12845 struct drm_i915_private *dev_priv = to_i915(dev);
12846
12847 /* Is there platform-specific watermark information to calculate? */
12848 if (dev_priv->display.compute_global_watermarks)
12849 return dev_priv->display.compute_global_watermarks(state);
12850
12851 return 0;
12852 }
12853
12854 /**
12855 * intel_atomic_check - validate state object
12856 * @dev: drm device
12857 * @state: state to validate
12858 */
12859 static int intel_atomic_check(struct drm_device *dev,
12860 struct drm_atomic_state *state)
12861 {
12862 struct drm_i915_private *dev_priv = to_i915(dev);
12863 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12864 struct drm_crtc *crtc;
12865 struct drm_crtc_state *old_crtc_state, *crtc_state;
12866 int ret, i;
12867 bool any_ms = false;
12868
12869 /* Catch I915_MODE_FLAG_INHERITED */
12870 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12871 crtc_state, i) {
12872 if (crtc_state->mode.private_flags !=
12873 old_crtc_state->mode.private_flags)
12874 crtc_state->mode_changed = true;
12875 }
12876
12877 ret = drm_atomic_helper_check_modeset(dev, state);
12878 if (ret)
12879 return ret;
12880
12881 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12882 struct intel_crtc_state *pipe_config =
12883 to_intel_crtc_state(crtc_state);
12884
12885 if (!needs_modeset(crtc_state))
12886 continue;
12887
12888 if (!crtc_state->enable) {
12889 any_ms = true;
12890 continue;
12891 }
12892
12893 ret = intel_modeset_pipe_config(crtc, pipe_config);
12894 if (ret == -EDEADLK)
12895 return ret;
12896 if (ret) {
12897 intel_dump_pipe_config(to_intel_crtc(crtc),
12898 pipe_config, "[failed]");
12899 return ret;
12900 }
12901
12902 if (intel_pipe_config_compare(dev_priv,
12903 to_intel_crtc_state(old_crtc_state),
12904 pipe_config, true)) {
12905 crtc_state->mode_changed = false;
12906 pipe_config->update_pipe = true;
12907 }
12908
12909 if (needs_modeset(crtc_state))
12910 any_ms = true;
12911
12912 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12913 needs_modeset(crtc_state) ?
12914 "[modeset]" : "[fastset]");
12915 }
12916
12917 ret = drm_dp_mst_atomic_check(state);
12918 if (ret)
12919 return ret;
12920
12921 if (any_ms) {
12922 ret = intel_modeset_checks(state);
12923
12924 if (ret)
12925 return ret;
12926 } else {
12927 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12928 }
12929
12930 ret = icl_add_linked_planes(intel_state);
12931 if (ret)
12932 return ret;
12933
12934 ret = drm_atomic_helper_check_planes(dev, state);
12935 if (ret)
12936 return ret;
12937
12938 intel_fbc_choose_crtc(dev_priv, intel_state);
12939 return calc_watermark_data(intel_state);
12940 }
12941
12942 static int intel_atomic_prepare_commit(struct drm_device *dev,
12943 struct drm_atomic_state *state)
12944 {
12945 return drm_atomic_helper_prepare_planes(dev, state);
12946 }
12947
12948 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12949 {
12950 struct drm_device *dev = crtc->base.dev;
12951 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
12952
12953 if (!vblank->max_vblank_count)
12954 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12955
12956 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12957 }
12958
12959 static void intel_update_crtc(struct drm_crtc *crtc,
12960 struct drm_atomic_state *state,
12961 struct drm_crtc_state *old_crtc_state,
12962 struct drm_crtc_state *new_crtc_state)
12963 {
12964 struct drm_device *dev = crtc->dev;
12965 struct drm_i915_private *dev_priv = to_i915(dev);
12966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12967 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12968 bool modeset = needs_modeset(new_crtc_state);
12969 struct intel_plane_state *new_plane_state =
12970 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12971 to_intel_plane(crtc->primary));
12972
12973 if (modeset) {
12974 update_scanline_offset(pipe_config);
12975 dev_priv->display.crtc_enable(pipe_config, state);
12976
12977 /* vblanks work again, re-enable pipe CRC. */
12978 intel_crtc_enable_pipe_crc(intel_crtc);
12979 } else {
12980 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12981 pipe_config);
12982
12983 if (pipe_config->update_pipe)
12984 intel_encoders_update_pipe(crtc, pipe_config, state);
12985 }
12986
12987 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
12988 intel_fbc_disable(intel_crtc);
12989 else if (new_plane_state)
12990 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12991
12992 intel_begin_crtc_commit(crtc, old_crtc_state);
12993
12994 if (INTEL_GEN(dev_priv) >= 9)
12995 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
12996 else
12997 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
12998
12999 intel_finish_crtc_commit(crtc, old_crtc_state);
13000 }
13001
13002 static void intel_update_crtcs(struct drm_atomic_state *state)
13003 {
13004 struct drm_crtc *crtc;
13005 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13006 int i;
13007
13008 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13009 if (!new_crtc_state->active)
13010 continue;
13011
13012 intel_update_crtc(crtc, state, old_crtc_state,
13013 new_crtc_state);
13014 }
13015 }
13016
13017 static void skl_update_crtcs(struct drm_atomic_state *state)
13018 {
13019 struct drm_i915_private *dev_priv = to_i915(state->dev);
13020 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13021 struct drm_crtc *crtc;
13022 struct intel_crtc *intel_crtc;
13023 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13024 struct intel_crtc_state *cstate;
13025 unsigned int updated = 0;
13026 bool progress;
13027 enum pipe pipe;
13028 int i;
13029 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13030 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13031 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13032
13033 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13034 /* ignore allocations for crtc's that have been turned off. */
13035 if (new_crtc_state->active)
13036 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13037
13038 /* If 2nd DBuf slice required, enable it here */
13039 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13040 icl_dbuf_slices_update(dev_priv, required_slices);
13041
13042 /*
13043 * Whenever the number of active pipes changes, we need to make sure we
13044 * update the pipes in the right order so that their ddb allocations
13045 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13046 * cause pipe underruns and other bad stuff.
13047 */
13048 do {
13049 progress = false;
13050
13051 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13052 bool vbl_wait = false;
13053 unsigned int cmask = drm_crtc_mask(crtc);
13054
13055 intel_crtc = to_intel_crtc(crtc);
13056 cstate = to_intel_crtc_state(new_crtc_state);
13057 pipe = intel_crtc->pipe;
13058
13059 if (updated & cmask || !cstate->base.active)
13060 continue;
13061
13062 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13063 entries,
13064 INTEL_INFO(dev_priv)->num_pipes, i))
13065 continue;
13066
13067 updated |= cmask;
13068 entries[i] = cstate->wm.skl.ddb;
13069
13070 /*
13071 * If this is an already active pipe, it's DDB changed,
13072 * and this isn't the last pipe that needs updating
13073 * then we need to wait for a vblank to pass for the
13074 * new ddb allocation to take effect.
13075 */
13076 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13077 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13078 !new_crtc_state->active_changed &&
13079 intel_state->wm_results.dirty_pipes != updated)
13080 vbl_wait = true;
13081
13082 intel_update_crtc(crtc, state, old_crtc_state,
13083 new_crtc_state);
13084
13085 if (vbl_wait)
13086 intel_wait_for_vblank(dev_priv, pipe);
13087
13088 progress = true;
13089 }
13090 } while (progress);
13091
13092 /* If 2nd DBuf slice is no more required disable it */
13093 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13094 icl_dbuf_slices_update(dev_priv, required_slices);
13095 }
13096
13097 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13098 {
13099 struct intel_atomic_state *state, *next;
13100 struct llist_node *freed;
13101
13102 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13103 llist_for_each_entry_safe(state, next, freed, freed)
13104 drm_atomic_state_put(&state->base);
13105 }
13106
13107 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13108 {
13109 struct drm_i915_private *dev_priv =
13110 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13111
13112 intel_atomic_helper_free_state(dev_priv);
13113 }
13114
13115 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13116 {
13117 struct wait_queue_entry wait_fence, wait_reset;
13118 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13119
13120 init_wait_entry(&wait_fence, 0);
13121 init_wait_entry(&wait_reset, 0);
13122 for (;;) {
13123 prepare_to_wait(&intel_state->commit_ready.wait,
13124 &wait_fence, TASK_UNINTERRUPTIBLE);
13125 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13126 &wait_reset, TASK_UNINTERRUPTIBLE);
13127
13128
13129 if (i915_sw_fence_done(&intel_state->commit_ready)
13130 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13131 break;
13132
13133 schedule();
13134 }
13135 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13136 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13137 }
13138
13139 static void intel_atomic_cleanup_work(struct work_struct *work)
13140 {
13141 struct drm_atomic_state *state =
13142 container_of(work, struct drm_atomic_state, commit_work);
13143 struct drm_i915_private *i915 = to_i915(state->dev);
13144
13145 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13146 drm_atomic_helper_commit_cleanup_done(state);
13147 drm_atomic_state_put(state);
13148
13149 intel_atomic_helper_free_state(i915);
13150 }
13151
13152 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13153 {
13154 struct drm_device *dev = state->dev;
13155 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13156 struct drm_i915_private *dev_priv = to_i915(dev);
13157 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13158 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13159 struct drm_crtc *crtc;
13160 struct intel_crtc *intel_crtc;
13161 u64 put_domains[I915_MAX_PIPES] = {};
13162 intel_wakeref_t wakeref = 0;
13163 int i;
13164
13165 intel_atomic_commit_fence_wait(intel_state);
13166
13167 drm_atomic_helper_wait_for_dependencies(state);
13168
13169 if (intel_state->modeset)
13170 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13171
13172 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13173 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13174 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13175 intel_crtc = to_intel_crtc(crtc);
13176
13177 if (needs_modeset(new_crtc_state) ||
13178 to_intel_crtc_state(new_crtc_state)->update_pipe) {
13179
13180 put_domains[intel_crtc->pipe] =
13181 modeset_get_crtc_power_domains(crtc,
13182 new_intel_crtc_state);
13183 }
13184
13185 if (!needs_modeset(new_crtc_state))
13186 continue;
13187
13188 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13189
13190 if (old_crtc_state->active) {
13191 intel_crtc_disable_planes(intel_state, intel_crtc);
13192
13193 /*
13194 * We need to disable pipe CRC before disabling the pipe,
13195 * or we race against vblank off.
13196 */
13197 intel_crtc_disable_pipe_crc(intel_crtc);
13198
13199 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13200 intel_crtc->active = false;
13201 intel_fbc_disable(intel_crtc);
13202 intel_disable_shared_dpll(old_intel_crtc_state);
13203
13204 /*
13205 * Underruns don't always raise
13206 * interrupts, so check manually.
13207 */
13208 intel_check_cpu_fifo_underruns(dev_priv);
13209 intel_check_pch_fifo_underruns(dev_priv);
13210
13211 /* FIXME unify this for all platforms */
13212 if (!new_crtc_state->active &&
13213 !HAS_GMCH(dev_priv) &&
13214 dev_priv->display.initial_watermarks)
13215 dev_priv->display.initial_watermarks(intel_state,
13216 new_intel_crtc_state);
13217 }
13218 }
13219
13220 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13221 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13222 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13223
13224 if (intel_state->modeset) {
13225 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13226
13227 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
13228
13229 /*
13230 * SKL workaround: bspec recommends we disable the SAGV when we
13231 * have more then one pipe enabled
13232 */
13233 if (!intel_can_enable_sagv(state))
13234 intel_disable_sagv(dev_priv);
13235
13236 intel_modeset_verify_disabled(dev, state);
13237 }
13238
13239 /* Complete the events for pipes that have now been disabled */
13240 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13241 bool modeset = needs_modeset(new_crtc_state);
13242
13243 /* Complete events for now disable pipes here. */
13244 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13245 spin_lock_irq(&dev->event_lock);
13246 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13247 spin_unlock_irq(&dev->event_lock);
13248
13249 new_crtc_state->event = NULL;
13250 }
13251 }
13252
13253 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13254 dev_priv->display.update_crtcs(state);
13255
13256 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13257 * already, but still need the state for the delayed optimization. To
13258 * fix this:
13259 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13260 * - schedule that vblank worker _before_ calling hw_done
13261 * - at the start of commit_tail, cancel it _synchrously
13262 * - switch over to the vblank wait helper in the core after that since
13263 * we don't need out special handling any more.
13264 */
13265 drm_atomic_helper_wait_for_flip_done(dev, state);
13266
13267 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13268 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13269
13270 if (new_crtc_state->active &&
13271 !needs_modeset(new_crtc_state) &&
13272 (new_intel_crtc_state->base.color_mgmt_changed ||
13273 new_intel_crtc_state->update_pipe))
13274 intel_color_load_luts(new_intel_crtc_state);
13275 }
13276
13277 /*
13278 * Now that the vblank has passed, we can go ahead and program the
13279 * optimal watermarks on platforms that need two-step watermark
13280 * programming.
13281 *
13282 * TODO: Move this (and other cleanup) to an async worker eventually.
13283 */
13284 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13285 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13286
13287 if (dev_priv->display.optimize_watermarks)
13288 dev_priv->display.optimize_watermarks(intel_state,
13289 new_intel_crtc_state);
13290 }
13291
13292 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13293 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13294
13295 if (put_domains[i])
13296 modeset_put_power_domains(dev_priv, put_domains[i]);
13297
13298 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13299 }
13300
13301 if (intel_state->modeset)
13302 intel_verify_planes(intel_state);
13303
13304 if (intel_state->modeset && intel_can_enable_sagv(state))
13305 intel_enable_sagv(dev_priv);
13306
13307 drm_atomic_helper_commit_hw_done(state);
13308
13309 if (intel_state->modeset) {
13310 /* As one of the primary mmio accessors, KMS has a high
13311 * likelihood of triggering bugs in unclaimed access. After we
13312 * finish modesetting, see if an error has been flagged, and if
13313 * so enable debugging for the next modeset - and hope we catch
13314 * the culprit.
13315 */
13316 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13317 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13318 }
13319
13320 /*
13321 * Defer the cleanup of the old state to a separate worker to not
13322 * impede the current task (userspace for blocking modesets) that
13323 * are executed inline. For out-of-line asynchronous modesets/flips,
13324 * deferring to a new worker seems overkill, but we would place a
13325 * schedule point (cond_resched()) here anyway to keep latencies
13326 * down.
13327 */
13328 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13329 queue_work(system_highpri_wq, &state->commit_work);
13330 }
13331
13332 static void intel_atomic_commit_work(struct work_struct *work)
13333 {
13334 struct drm_atomic_state *state =
13335 container_of(work, struct drm_atomic_state, commit_work);
13336
13337 intel_atomic_commit_tail(state);
13338 }
13339
13340 static int __i915_sw_fence_call
13341 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13342 enum i915_sw_fence_notify notify)
13343 {
13344 struct intel_atomic_state *state =
13345 container_of(fence, struct intel_atomic_state, commit_ready);
13346
13347 switch (notify) {
13348 case FENCE_COMPLETE:
13349 /* we do blocking waits in the worker, nothing to do here */
13350 break;
13351 case FENCE_FREE:
13352 {
13353 struct intel_atomic_helper *helper =
13354 &to_i915(state->base.dev)->atomic_helper;
13355
13356 if (llist_add(&state->freed, &helper->free_list))
13357 schedule_work(&helper->free_work);
13358 break;
13359 }
13360 }
13361
13362 return NOTIFY_DONE;
13363 }
13364
13365 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13366 {
13367 struct drm_plane_state *old_plane_state, *new_plane_state;
13368 struct drm_plane *plane;
13369 int i;
13370
13371 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13372 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13373 intel_fb_obj(new_plane_state->fb),
13374 to_intel_plane(plane)->frontbuffer_bit);
13375 }
13376
13377 /**
13378 * intel_atomic_commit - commit validated state object
13379 * @dev: DRM device
13380 * @state: the top-level driver state object
13381 * @nonblock: nonblocking commit
13382 *
13383 * This function commits a top-level state object that has been validated
13384 * with drm_atomic_helper_check().
13385 *
13386 * RETURNS
13387 * Zero for success or -errno.
13388 */
13389 static int intel_atomic_commit(struct drm_device *dev,
13390 struct drm_atomic_state *state,
13391 bool nonblock)
13392 {
13393 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13394 struct drm_i915_private *dev_priv = to_i915(dev);
13395 int ret = 0;
13396
13397 drm_atomic_state_get(state);
13398 i915_sw_fence_init(&intel_state->commit_ready,
13399 intel_atomic_commit_ready);
13400
13401 /*
13402 * The intel_legacy_cursor_update() fast path takes care
13403 * of avoiding the vblank waits for simple cursor
13404 * movement and flips. For cursor on/off and size changes,
13405 * we want to perform the vblank waits so that watermark
13406 * updates happen during the correct frames. Gen9+ have
13407 * double buffered watermarks and so shouldn't need this.
13408 *
13409 * Unset state->legacy_cursor_update before the call to
13410 * drm_atomic_helper_setup_commit() because otherwise
13411 * drm_atomic_helper_wait_for_flip_done() is a noop and
13412 * we get FIFO underruns because we didn't wait
13413 * for vblank.
13414 *
13415 * FIXME doing watermarks and fb cleanup from a vblank worker
13416 * (assuming we had any) would solve these problems.
13417 */
13418 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13419 struct intel_crtc_state *new_crtc_state;
13420 struct intel_crtc *crtc;
13421 int i;
13422
13423 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13424 if (new_crtc_state->wm.need_postvbl_update ||
13425 new_crtc_state->update_wm_post)
13426 state->legacy_cursor_update = false;
13427 }
13428
13429 ret = intel_atomic_prepare_commit(dev, state);
13430 if (ret) {
13431 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13432 i915_sw_fence_commit(&intel_state->commit_ready);
13433 return ret;
13434 }
13435
13436 ret = drm_atomic_helper_setup_commit(state, nonblock);
13437 if (!ret)
13438 ret = drm_atomic_helper_swap_state(state, true);
13439
13440 if (ret) {
13441 i915_sw_fence_commit(&intel_state->commit_ready);
13442
13443 drm_atomic_helper_cleanup_planes(dev, state);
13444 return ret;
13445 }
13446 dev_priv->wm.distrust_bios_wm = false;
13447 intel_shared_dpll_swap_state(state);
13448 intel_atomic_track_fbs(state);
13449
13450 if (intel_state->modeset) {
13451 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13452 sizeof(intel_state->min_cdclk));
13453 memcpy(dev_priv->min_voltage_level,
13454 intel_state->min_voltage_level,
13455 sizeof(intel_state->min_voltage_level));
13456 dev_priv->active_crtcs = intel_state->active_crtcs;
13457 dev_priv->cdclk.logical = intel_state->cdclk.logical;
13458 dev_priv->cdclk.actual = intel_state->cdclk.actual;
13459 }
13460
13461 drm_atomic_state_get(state);
13462 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13463
13464 i915_sw_fence_commit(&intel_state->commit_ready);
13465 if (nonblock && intel_state->modeset) {
13466 queue_work(dev_priv->modeset_wq, &state->commit_work);
13467 } else if (nonblock) {
13468 queue_work(system_unbound_wq, &state->commit_work);
13469 } else {
13470 if (intel_state->modeset)
13471 flush_workqueue(dev_priv->modeset_wq);
13472 intel_atomic_commit_tail(state);
13473 }
13474
13475 return 0;
13476 }
13477
13478 static const struct drm_crtc_funcs intel_crtc_funcs = {
13479 .gamma_set = drm_atomic_helper_legacy_gamma_set,
13480 .set_config = drm_atomic_helper_set_config,
13481 .destroy = intel_crtc_destroy,
13482 .page_flip = drm_atomic_helper_page_flip,
13483 .atomic_duplicate_state = intel_crtc_duplicate_state,
13484 .atomic_destroy_state = intel_crtc_destroy_state,
13485 .set_crc_source = intel_crtc_set_crc_source,
13486 .verify_crc_source = intel_crtc_verify_crc_source,
13487 .get_crc_sources = intel_crtc_get_crc_sources,
13488 };
13489
13490 struct wait_rps_boost {
13491 struct wait_queue_entry wait;
13492
13493 struct drm_crtc *crtc;
13494 struct i915_request *request;
13495 };
13496
13497 static int do_rps_boost(struct wait_queue_entry *_wait,
13498 unsigned mode, int sync, void *key)
13499 {
13500 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13501 struct i915_request *rq = wait->request;
13502
13503 /*
13504 * If we missed the vblank, but the request is already running it
13505 * is reasonable to assume that it will complete before the next
13506 * vblank without our intervention, so leave RPS alone.
13507 */
13508 if (!i915_request_started(rq))
13509 gen6_rps_boost(rq, NULL);
13510 i915_request_put(rq);
13511
13512 drm_crtc_vblank_put(wait->crtc);
13513
13514 list_del(&wait->wait.entry);
13515 kfree(wait);
13516 return 1;
13517 }
13518
13519 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13520 struct dma_fence *fence)
13521 {
13522 struct wait_rps_boost *wait;
13523
13524 if (!dma_fence_is_i915(fence))
13525 return;
13526
13527 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13528 return;
13529
13530 if (drm_crtc_vblank_get(crtc))
13531 return;
13532
13533 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13534 if (!wait) {
13535 drm_crtc_vblank_put(crtc);
13536 return;
13537 }
13538
13539 wait->request = to_request(dma_fence_get(fence));
13540 wait->crtc = crtc;
13541
13542 wait->wait.func = do_rps_boost;
13543 wait->wait.flags = 0;
13544
13545 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13546 }
13547
13548 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13549 {
13550 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13551 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13552 struct drm_framebuffer *fb = plane_state->base.fb;
13553 struct i915_vma *vma;
13554
13555 if (plane->id == PLANE_CURSOR &&
13556 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
13557 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13558 const int align = intel_cursor_alignment(dev_priv);
13559 int err;
13560
13561 err = i915_gem_object_attach_phys(obj, align);
13562 if (err)
13563 return err;
13564 }
13565
13566 vma = intel_pin_and_fence_fb_obj(fb,
13567 &plane_state->view,
13568 intel_plane_uses_fence(plane_state),
13569 &plane_state->flags);
13570 if (IS_ERR(vma))
13571 return PTR_ERR(vma);
13572
13573 plane_state->vma = vma;
13574
13575 return 0;
13576 }
13577
13578 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13579 {
13580 struct i915_vma *vma;
13581
13582 vma = fetch_and_zero(&old_plane_state->vma);
13583 if (vma)
13584 intel_unpin_fb_vma(vma, old_plane_state->flags);
13585 }
13586
13587 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13588 {
13589 struct i915_sched_attr attr = {
13590 .priority = I915_PRIORITY_DISPLAY,
13591 };
13592
13593 i915_gem_object_wait_priority(obj, 0, &attr);
13594 }
13595
13596 /**
13597 * intel_prepare_plane_fb - Prepare fb for usage on plane
13598 * @plane: drm plane to prepare for
13599 * @new_state: the plane state being prepared
13600 *
13601 * Prepares a framebuffer for usage on a display plane. Generally this
13602 * involves pinning the underlying object and updating the frontbuffer tracking
13603 * bits. Some older platforms need special physical address handling for
13604 * cursor planes.
13605 *
13606 * Must be called with struct_mutex held.
13607 *
13608 * Returns 0 on success, negative error code on failure.
13609 */
13610 int
13611 intel_prepare_plane_fb(struct drm_plane *plane,
13612 struct drm_plane_state *new_state)
13613 {
13614 struct intel_atomic_state *intel_state =
13615 to_intel_atomic_state(new_state->state);
13616 struct drm_i915_private *dev_priv = to_i915(plane->dev);
13617 struct drm_framebuffer *fb = new_state->fb;
13618 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13619 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13620 int ret;
13621
13622 if (old_obj) {
13623 struct drm_crtc_state *crtc_state =
13624 drm_atomic_get_new_crtc_state(new_state->state,
13625 plane->state->crtc);
13626
13627 /* Big Hammer, we also need to ensure that any pending
13628 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13629 * current scanout is retired before unpinning the old
13630 * framebuffer. Note that we rely on userspace rendering
13631 * into the buffer attached to the pipe they are waiting
13632 * on. If not, userspace generates a GPU hang with IPEHR
13633 * point to the MI_WAIT_FOR_EVENT.
13634 *
13635 * This should only fail upon a hung GPU, in which case we
13636 * can safely continue.
13637 */
13638 if (needs_modeset(crtc_state)) {
13639 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13640 old_obj->resv, NULL,
13641 false, 0,
13642 GFP_KERNEL);
13643 if (ret < 0)
13644 return ret;
13645 }
13646 }
13647
13648 if (new_state->fence) { /* explicit fencing */
13649 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13650 new_state->fence,
13651 I915_FENCE_TIMEOUT,
13652 GFP_KERNEL);
13653 if (ret < 0)
13654 return ret;
13655 }
13656
13657 if (!obj)
13658 return 0;
13659
13660 ret = i915_gem_object_pin_pages(obj);
13661 if (ret)
13662 return ret;
13663
13664 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13665 if (ret) {
13666 i915_gem_object_unpin_pages(obj);
13667 return ret;
13668 }
13669
13670 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13671
13672 mutex_unlock(&dev_priv->drm.struct_mutex);
13673 i915_gem_object_unpin_pages(obj);
13674 if (ret)
13675 return ret;
13676
13677 fb_obj_bump_render_priority(obj);
13678 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13679
13680 if (!new_state->fence) { /* implicit fencing */
13681 struct dma_fence *fence;
13682
13683 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13684 obj->resv, NULL,
13685 false, I915_FENCE_TIMEOUT,
13686 GFP_KERNEL);
13687 if (ret < 0)
13688 return ret;
13689
13690 fence = reservation_object_get_excl_rcu(obj->resv);
13691 if (fence) {
13692 add_rps_boost_after_vblank(new_state->crtc, fence);
13693 dma_fence_put(fence);
13694 }
13695 } else {
13696 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13697 }
13698
13699 /*
13700 * We declare pageflips to be interactive and so merit a small bias
13701 * towards upclocking to deliver the frame on time. By only changing
13702 * the RPS thresholds to sample more regularly and aim for higher
13703 * clocks we can hopefully deliver low power workloads (like kodi)
13704 * that are not quite steady state without resorting to forcing
13705 * maximum clocks following a vblank miss (see do_rps_boost()).
13706 */
13707 if (!intel_state->rps_interactive) {
13708 intel_rps_mark_interactive(dev_priv, true);
13709 intel_state->rps_interactive = true;
13710 }
13711
13712 return 0;
13713 }
13714
13715 /**
13716 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13717 * @plane: drm plane to clean up for
13718 * @old_state: the state from the previous modeset
13719 *
13720 * Cleans up a framebuffer that has just been removed from a plane.
13721 *
13722 * Must be called with struct_mutex held.
13723 */
13724 void
13725 intel_cleanup_plane_fb(struct drm_plane *plane,
13726 struct drm_plane_state *old_state)
13727 {
13728 struct intel_atomic_state *intel_state =
13729 to_intel_atomic_state(old_state->state);
13730 struct drm_i915_private *dev_priv = to_i915(plane->dev);
13731
13732 if (intel_state->rps_interactive) {
13733 intel_rps_mark_interactive(dev_priv, false);
13734 intel_state->rps_interactive = false;
13735 }
13736
13737 /* Should only be called after a successful intel_prepare_plane_fb()! */
13738 mutex_lock(&dev_priv->drm.struct_mutex);
13739 intel_plane_unpin_fb(to_intel_plane_state(old_state));
13740 mutex_unlock(&dev_priv->drm.struct_mutex);
13741 }
13742
13743 int
13744 skl_max_scale(const struct intel_crtc_state *crtc_state,
13745 u32 pixel_format)
13746 {
13747 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13748 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13749 int max_scale, mult;
13750 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13751
13752 if (!crtc_state->base.enable)
13753 return DRM_PLANE_HELPER_NO_SCALING;
13754
13755 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13756 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13757
13758 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13759 max_dotclk *= 2;
13760
13761 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13762 return DRM_PLANE_HELPER_NO_SCALING;
13763
13764 /*
13765 * skl max scale is lower of:
13766 * close to 3 but not 3, -1 is for that purpose
13767 * or
13768 * cdclk/crtc_clock
13769 */
13770 mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13771 tmpclk1 = (1 << 16) * mult - 1;
13772 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13773 max_scale = min(tmpclk1, tmpclk2);
13774
13775 return max_scale;
13776 }
13777
13778 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13779 struct drm_crtc_state *old_crtc_state)
13780 {
13781 struct drm_device *dev = crtc->dev;
13782 struct drm_i915_private *dev_priv = to_i915(dev);
13783 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13784 struct intel_crtc_state *old_intel_cstate =
13785 to_intel_crtc_state(old_crtc_state);
13786 struct intel_atomic_state *old_intel_state =
13787 to_intel_atomic_state(old_crtc_state->state);
13788 struct intel_crtc_state *intel_cstate =
13789 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13790 bool modeset = needs_modeset(&intel_cstate->base);
13791
13792 /* Perform vblank evasion around commit operation */
13793 intel_pipe_update_start(intel_cstate);
13794
13795 if (modeset)
13796 goto out;
13797
13798 if (intel_cstate->base.color_mgmt_changed ||
13799 intel_cstate->update_pipe)
13800 intel_color_commit(intel_cstate);
13801
13802 if (intel_cstate->update_pipe)
13803 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13804 else if (INTEL_GEN(dev_priv) >= 9)
13805 skl_detach_scalers(intel_cstate);
13806
13807 out:
13808 if (dev_priv->display.atomic_update_watermarks)
13809 dev_priv->display.atomic_update_watermarks(old_intel_state,
13810 intel_cstate);
13811 }
13812
13813 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13814 struct intel_crtc_state *crtc_state)
13815 {
13816 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13817
13818 if (!IS_GEN(dev_priv, 2))
13819 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13820
13821 if (crtc_state->has_pch_encoder) {
13822 enum pipe pch_transcoder =
13823 intel_crtc_pch_transcoder(crtc);
13824
13825 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13826 }
13827 }
13828
13829 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13830 struct drm_crtc_state *old_crtc_state)
13831 {
13832 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13833 struct intel_atomic_state *old_intel_state =
13834 to_intel_atomic_state(old_crtc_state->state);
13835 struct intel_crtc_state *new_crtc_state =
13836 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13837
13838 intel_pipe_update_end(new_crtc_state);
13839
13840 if (new_crtc_state->update_pipe &&
13841 !needs_modeset(&new_crtc_state->base) &&
13842 old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13843 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13844 }
13845
13846 /**
13847 * intel_plane_destroy - destroy a plane
13848 * @plane: plane to destroy
13849 *
13850 * Common destruction function for all types of planes (primary, cursor,
13851 * sprite).
13852 */
13853 void intel_plane_destroy(struct drm_plane *plane)
13854 {
13855 drm_plane_cleanup(plane);
13856 kfree(to_intel_plane(plane));
13857 }
13858
13859 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13860 u32 format, u64 modifier)
13861 {
13862 switch (modifier) {
13863 case DRM_FORMAT_MOD_LINEAR:
13864 case I915_FORMAT_MOD_X_TILED:
13865 break;
13866 default:
13867 return false;
13868 }
13869
13870 switch (format) {
13871 case DRM_FORMAT_C8:
13872 case DRM_FORMAT_RGB565:
13873 case DRM_FORMAT_XRGB1555:
13874 case DRM_FORMAT_XRGB8888:
13875 return modifier == DRM_FORMAT_MOD_LINEAR ||
13876 modifier == I915_FORMAT_MOD_X_TILED;
13877 default:
13878 return false;
13879 }
13880 }
13881
13882 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13883 u32 format, u64 modifier)
13884 {
13885 switch (modifier) {
13886 case DRM_FORMAT_MOD_LINEAR:
13887 case I915_FORMAT_MOD_X_TILED:
13888 break;
13889 default:
13890 return false;
13891 }
13892
13893 switch (format) {
13894 case DRM_FORMAT_C8:
13895 case DRM_FORMAT_RGB565:
13896 case DRM_FORMAT_XRGB8888:
13897 case DRM_FORMAT_XBGR8888:
13898 case DRM_FORMAT_XRGB2101010:
13899 case DRM_FORMAT_XBGR2101010:
13900 return modifier == DRM_FORMAT_MOD_LINEAR ||
13901 modifier == I915_FORMAT_MOD_X_TILED;
13902 default:
13903 return false;
13904 }
13905 }
13906
13907 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13908 u32 format, u64 modifier)
13909 {
13910 return modifier == DRM_FORMAT_MOD_LINEAR &&
13911 format == DRM_FORMAT_ARGB8888;
13912 }
13913
13914 static const struct drm_plane_funcs i965_plane_funcs = {
13915 .update_plane = drm_atomic_helper_update_plane,
13916 .disable_plane = drm_atomic_helper_disable_plane,
13917 .destroy = intel_plane_destroy,
13918 .atomic_get_property = intel_plane_atomic_get_property,
13919 .atomic_set_property = intel_plane_atomic_set_property,
13920 .atomic_duplicate_state = intel_plane_duplicate_state,
13921 .atomic_destroy_state = intel_plane_destroy_state,
13922 .format_mod_supported = i965_plane_format_mod_supported,
13923 };
13924
13925 static const struct drm_plane_funcs i8xx_plane_funcs = {
13926 .update_plane = drm_atomic_helper_update_plane,
13927 .disable_plane = drm_atomic_helper_disable_plane,
13928 .destroy = intel_plane_destroy,
13929 .atomic_get_property = intel_plane_atomic_get_property,
13930 .atomic_set_property = intel_plane_atomic_set_property,
13931 .atomic_duplicate_state = intel_plane_duplicate_state,
13932 .atomic_destroy_state = intel_plane_destroy_state,
13933 .format_mod_supported = i8xx_plane_format_mod_supported,
13934 };
13935
13936 static int
13937 intel_legacy_cursor_update(struct drm_plane *plane,
13938 struct drm_crtc *crtc,
13939 struct drm_framebuffer *fb,
13940 int crtc_x, int crtc_y,
13941 unsigned int crtc_w, unsigned int crtc_h,
13942 u32 src_x, u32 src_y,
13943 u32 src_w, u32 src_h,
13944 struct drm_modeset_acquire_ctx *ctx)
13945 {
13946 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13947 int ret;
13948 struct drm_plane_state *old_plane_state, *new_plane_state;
13949 struct intel_plane *intel_plane = to_intel_plane(plane);
13950 struct drm_framebuffer *old_fb;
13951 struct intel_crtc_state *crtc_state =
13952 to_intel_crtc_state(crtc->state);
13953 struct intel_crtc_state *new_crtc_state;
13954
13955 /*
13956 * When crtc is inactive or there is a modeset pending,
13957 * wait for it to complete in the slowpath
13958 */
13959 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13960 crtc_state->update_pipe)
13961 goto slow;
13962
13963 old_plane_state = plane->state;
13964 /*
13965 * Don't do an async update if there is an outstanding commit modifying
13966 * the plane. This prevents our async update's changes from getting
13967 * overridden by a previous synchronous update's state.
13968 */
13969 if (old_plane_state->commit &&
13970 !try_wait_for_completion(&old_plane_state->commit->hw_done))
13971 goto slow;
13972
13973 /*
13974 * If any parameters change that may affect watermarks,
13975 * take the slowpath. Only changing fb or position should be
13976 * in the fastpath.
13977 */
13978 if (old_plane_state->crtc != crtc ||
13979 old_plane_state->src_w != src_w ||
13980 old_plane_state->src_h != src_h ||
13981 old_plane_state->crtc_w != crtc_w ||
13982 old_plane_state->crtc_h != crtc_h ||
13983 !old_plane_state->fb != !fb)
13984 goto slow;
13985
13986 new_plane_state = intel_plane_duplicate_state(plane);
13987 if (!new_plane_state)
13988 return -ENOMEM;
13989
13990 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13991 if (!new_crtc_state) {
13992 ret = -ENOMEM;
13993 goto out_free;
13994 }
13995
13996 drm_atomic_set_fb_for_plane(new_plane_state, fb);
13997
13998 new_plane_state->src_x = src_x;
13999 new_plane_state->src_y = src_y;
14000 new_plane_state->src_w = src_w;
14001 new_plane_state->src_h = src_h;
14002 new_plane_state->crtc_x = crtc_x;
14003 new_plane_state->crtc_y = crtc_y;
14004 new_plane_state->crtc_w = crtc_w;
14005 new_plane_state->crtc_h = crtc_h;
14006
14007 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14008 to_intel_plane_state(old_plane_state),
14009 to_intel_plane_state(new_plane_state));
14010 if (ret)
14011 goto out_free;
14012
14013 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14014 if (ret)
14015 goto out_free;
14016
14017 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14018 if (ret)
14019 goto out_unlock;
14020
14021 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14022
14023 old_fb = old_plane_state->fb;
14024 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14025 intel_plane->frontbuffer_bit);
14026
14027 /* Swap plane state */
14028 plane->state = new_plane_state;
14029
14030 /*
14031 * We cannot swap crtc_state as it may be in use by an atomic commit or
14032 * page flip that's running simultaneously. If we swap crtc_state and
14033 * destroy the old state, we will cause a use-after-free there.
14034 *
14035 * Only update active_planes, which is needed for our internal
14036 * bookkeeping. Either value will do the right thing when updating
14037 * planes atomically. If the cursor was part of the atomic update then
14038 * we would have taken the slowpath.
14039 */
14040 crtc_state->active_planes = new_crtc_state->active_planes;
14041
14042 if (plane->state->visible) {
14043 trace_intel_update_plane(plane, to_intel_crtc(crtc));
14044 intel_plane->update_plane(intel_plane, crtc_state,
14045 to_intel_plane_state(plane->state));
14046 } else {
14047 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
14048 intel_plane->disable_plane(intel_plane, crtc_state);
14049 }
14050
14051 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14052
14053 out_unlock:
14054 mutex_unlock(&dev_priv->drm.struct_mutex);
14055 out_free:
14056 if (new_crtc_state)
14057 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14058 if (ret)
14059 intel_plane_destroy_state(plane, new_plane_state);
14060 else
14061 intel_plane_destroy_state(plane, old_plane_state);
14062 return ret;
14063
14064 slow:
14065 return drm_atomic_helper_update_plane(plane, crtc, fb,
14066 crtc_x, crtc_y, crtc_w, crtc_h,
14067 src_x, src_y, src_w, src_h, ctx);
14068 }
14069
14070 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14071 .update_plane = intel_legacy_cursor_update,
14072 .disable_plane = drm_atomic_helper_disable_plane,
14073 .destroy = intel_plane_destroy,
14074 .atomic_get_property = intel_plane_atomic_get_property,
14075 .atomic_set_property = intel_plane_atomic_set_property,
14076 .atomic_duplicate_state = intel_plane_duplicate_state,
14077 .atomic_destroy_state = intel_plane_destroy_state,
14078 .format_mod_supported = intel_cursor_format_mod_supported,
14079 };
14080
14081 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14082 enum i9xx_plane_id i9xx_plane)
14083 {
14084 if (!HAS_FBC(dev_priv))
14085 return false;
14086
14087 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14088 return i9xx_plane == PLANE_A; /* tied to pipe A */
14089 else if (IS_IVYBRIDGE(dev_priv))
14090 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14091 i9xx_plane == PLANE_C;
14092 else if (INTEL_GEN(dev_priv) >= 4)
14093 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14094 else
14095 return i9xx_plane == PLANE_A;
14096 }
14097
14098 static struct intel_plane *
14099 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14100 {
14101 struct intel_plane *plane;
14102 const struct drm_plane_funcs *plane_funcs;
14103 unsigned int supported_rotations;
14104 unsigned int possible_crtcs;
14105 const u64 *modifiers;
14106 const u32 *formats;
14107 int num_formats;
14108 int ret;
14109
14110 if (INTEL_GEN(dev_priv) >= 9)
14111 return skl_universal_plane_create(dev_priv, pipe,
14112 PLANE_PRIMARY);
14113
14114 plane = intel_plane_alloc();
14115 if (IS_ERR(plane))
14116 return plane;
14117
14118 plane->pipe = pipe;
14119 /*
14120 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14121 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14122 */
14123 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14124 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14125 else
14126 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14127 plane->id = PLANE_PRIMARY;
14128 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14129
14130 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14131 if (plane->has_fbc) {
14132 struct intel_fbc *fbc = &dev_priv->fbc;
14133
14134 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14135 }
14136
14137 if (INTEL_GEN(dev_priv) >= 4) {
14138 formats = i965_primary_formats;
14139 num_formats = ARRAY_SIZE(i965_primary_formats);
14140 modifiers = i9xx_format_modifiers;
14141
14142 plane->max_stride = i9xx_plane_max_stride;
14143 plane->update_plane = i9xx_update_plane;
14144 plane->disable_plane = i9xx_disable_plane;
14145 plane->get_hw_state = i9xx_plane_get_hw_state;
14146 plane->check_plane = i9xx_plane_check;
14147
14148 plane_funcs = &i965_plane_funcs;
14149 } else {
14150 formats = i8xx_primary_formats;
14151 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14152 modifiers = i9xx_format_modifiers;
14153
14154 plane->max_stride = i9xx_plane_max_stride;
14155 plane->update_plane = i9xx_update_plane;
14156 plane->disable_plane = i9xx_disable_plane;
14157 plane->get_hw_state = i9xx_plane_get_hw_state;
14158 plane->check_plane = i9xx_plane_check;
14159
14160 plane_funcs = &i8xx_plane_funcs;
14161 }
14162
14163 possible_crtcs = BIT(pipe);
14164
14165 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14166 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14167 possible_crtcs, plane_funcs,
14168 formats, num_formats, modifiers,
14169 DRM_PLANE_TYPE_PRIMARY,
14170 "primary %c", pipe_name(pipe));
14171 else
14172 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14173 possible_crtcs, plane_funcs,
14174 formats, num_formats, modifiers,
14175 DRM_PLANE_TYPE_PRIMARY,
14176 "plane %c",
14177 plane_name(plane->i9xx_plane));
14178 if (ret)
14179 goto fail;
14180
14181 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14182 supported_rotations =
14183 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14184 DRM_MODE_REFLECT_X;
14185 } else if (INTEL_GEN(dev_priv) >= 4) {
14186 supported_rotations =
14187 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14188 } else {
14189 supported_rotations = DRM_MODE_ROTATE_0;
14190 }
14191
14192 if (INTEL_GEN(dev_priv) >= 4)
14193 drm_plane_create_rotation_property(&plane->base,
14194 DRM_MODE_ROTATE_0,
14195 supported_rotations);
14196
14197 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14198
14199 return plane;
14200
14201 fail:
14202 intel_plane_free(plane);
14203
14204 return ERR_PTR(ret);
14205 }
14206
14207 static struct intel_plane *
14208 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14209 enum pipe pipe)
14210 {
14211 unsigned int possible_crtcs;
14212 struct intel_plane *cursor;
14213 int ret;
14214
14215 cursor = intel_plane_alloc();
14216 if (IS_ERR(cursor))
14217 return cursor;
14218
14219 cursor->pipe = pipe;
14220 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14221 cursor->id = PLANE_CURSOR;
14222 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14223
14224 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14225 cursor->max_stride = i845_cursor_max_stride;
14226 cursor->update_plane = i845_update_cursor;
14227 cursor->disable_plane = i845_disable_cursor;
14228 cursor->get_hw_state = i845_cursor_get_hw_state;
14229 cursor->check_plane = i845_check_cursor;
14230 } else {
14231 cursor->max_stride = i9xx_cursor_max_stride;
14232 cursor->update_plane = i9xx_update_cursor;
14233 cursor->disable_plane = i9xx_disable_cursor;
14234 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14235 cursor->check_plane = i9xx_check_cursor;
14236 }
14237
14238 cursor->cursor.base = ~0;
14239 cursor->cursor.cntl = ~0;
14240
14241 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14242 cursor->cursor.size = ~0;
14243
14244 possible_crtcs = BIT(pipe);
14245
14246 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14247 possible_crtcs, &intel_cursor_plane_funcs,
14248 intel_cursor_formats,
14249 ARRAY_SIZE(intel_cursor_formats),
14250 cursor_format_modifiers,
14251 DRM_PLANE_TYPE_CURSOR,
14252 "cursor %c", pipe_name(pipe));
14253 if (ret)
14254 goto fail;
14255
14256 if (INTEL_GEN(dev_priv) >= 4)
14257 drm_plane_create_rotation_property(&cursor->base,
14258 DRM_MODE_ROTATE_0,
14259 DRM_MODE_ROTATE_0 |
14260 DRM_MODE_ROTATE_180);
14261
14262 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14263
14264 return cursor;
14265
14266 fail:
14267 intel_plane_free(cursor);
14268
14269 return ERR_PTR(ret);
14270 }
14271
14272 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14273 struct intel_crtc_state *crtc_state)
14274 {
14275 struct intel_crtc_scaler_state *scaler_state =
14276 &crtc_state->scaler_state;
14277 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14278 int i;
14279
14280 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14281 if (!crtc->num_scalers)
14282 return;
14283
14284 for (i = 0; i < crtc->num_scalers; i++) {
14285 struct intel_scaler *scaler = &scaler_state->scalers[i];
14286
14287 scaler->in_use = 0;
14288 scaler->mode = 0;
14289 }
14290
14291 scaler_state->scaler_id = -1;
14292 }
14293
14294 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14295 {
14296 struct intel_crtc *intel_crtc;
14297 struct intel_crtc_state *crtc_state = NULL;
14298 struct intel_plane *primary = NULL;
14299 struct intel_plane *cursor = NULL;
14300 int sprite, ret;
14301
14302 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14303 if (!intel_crtc)
14304 return -ENOMEM;
14305
14306 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14307 if (!crtc_state) {
14308 ret = -ENOMEM;
14309 goto fail;
14310 }
14311 intel_crtc->config = crtc_state;
14312 intel_crtc->base.state = &crtc_state->base;
14313 crtc_state->base.crtc = &intel_crtc->base;
14314
14315 primary = intel_primary_plane_create(dev_priv, pipe);
14316 if (IS_ERR(primary)) {
14317 ret = PTR_ERR(primary);
14318 goto fail;
14319 }
14320 intel_crtc->plane_ids_mask |= BIT(primary->id);
14321
14322 for_each_sprite(dev_priv, pipe, sprite) {
14323 struct intel_plane *plane;
14324
14325 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14326 if (IS_ERR(plane)) {
14327 ret = PTR_ERR(plane);
14328 goto fail;
14329 }
14330 intel_crtc->plane_ids_mask |= BIT(plane->id);
14331 }
14332
14333 cursor = intel_cursor_plane_create(dev_priv, pipe);
14334 if (IS_ERR(cursor)) {
14335 ret = PTR_ERR(cursor);
14336 goto fail;
14337 }
14338 intel_crtc->plane_ids_mask |= BIT(cursor->id);
14339
14340 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14341 &primary->base, &cursor->base,
14342 &intel_crtc_funcs,
14343 "pipe %c", pipe_name(pipe));
14344 if (ret)
14345 goto fail;
14346
14347 intel_crtc->pipe = pipe;
14348
14349 /* initialize shared scalers */
14350 intel_crtc_init_scalers(intel_crtc, crtc_state);
14351
14352 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14353 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14354 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14355
14356 if (INTEL_GEN(dev_priv) < 9) {
14357 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14358
14359 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14360 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14361 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14362 }
14363
14364 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14365
14366 intel_color_init(intel_crtc);
14367
14368 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14369
14370 return 0;
14371
14372 fail:
14373 /*
14374 * drm_mode_config_cleanup() will free up any
14375 * crtcs/planes already initialized.
14376 */
14377 kfree(crtc_state);
14378 kfree(intel_crtc);
14379
14380 return ret;
14381 }
14382
14383 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14384 struct drm_file *file)
14385 {
14386 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14387 struct drm_crtc *drmmode_crtc;
14388 struct intel_crtc *crtc;
14389
14390 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14391 if (!drmmode_crtc)
14392 return -ENOENT;
14393
14394 crtc = to_intel_crtc(drmmode_crtc);
14395 pipe_from_crtc_id->pipe = crtc->pipe;
14396
14397 return 0;
14398 }
14399
14400 static int intel_encoder_clones(struct intel_encoder *encoder)
14401 {
14402 struct drm_device *dev = encoder->base.dev;
14403 struct intel_encoder *source_encoder;
14404 int index_mask = 0;
14405 int entry = 0;
14406
14407 for_each_intel_encoder(dev, source_encoder) {
14408 if (encoders_cloneable(encoder, source_encoder))
14409 index_mask |= (1 << entry);
14410
14411 entry++;
14412 }
14413
14414 return index_mask;
14415 }
14416
14417 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14418 {
14419 if (!IS_MOBILE(dev_priv))
14420 return false;
14421
14422 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14423 return false;
14424
14425 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14426 return false;
14427
14428 return true;
14429 }
14430
14431 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14432 {
14433 if (INTEL_GEN(dev_priv) >= 9)
14434 return false;
14435
14436 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14437 return false;
14438
14439 if (HAS_PCH_LPT_H(dev_priv) &&
14440 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14441 return false;
14442
14443 /* DDI E can't be used if DDI A requires 4 lanes */
14444 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14445 return false;
14446
14447 if (!dev_priv->vbt.int_crt_support)
14448 return false;
14449
14450 return true;
14451 }
14452
14453 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14454 {
14455 int pps_num;
14456 int pps_idx;
14457
14458 if (HAS_DDI(dev_priv))
14459 return;
14460 /*
14461 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14462 * everywhere where registers can be write protected.
14463 */
14464 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14465 pps_num = 2;
14466 else
14467 pps_num = 1;
14468
14469 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14470 u32 val = I915_READ(PP_CONTROL(pps_idx));
14471
14472 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14473 I915_WRITE(PP_CONTROL(pps_idx), val);
14474 }
14475 }
14476
14477 static void intel_pps_init(struct drm_i915_private *dev_priv)
14478 {
14479 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14480 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14481 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14482 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14483 else
14484 dev_priv->pps_mmio_base = PPS_BASE;
14485
14486 intel_pps_unlock_regs_wa(dev_priv);
14487 }
14488
14489 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14490 {
14491 struct intel_encoder *encoder;
14492 bool dpd_is_edp = false;
14493
14494 intel_pps_init(dev_priv);
14495
14496 if (!HAS_DISPLAY(dev_priv))
14497 return;
14498
14499 if (IS_ICELAKE(dev_priv)) {
14500 intel_ddi_init(dev_priv, PORT_A);
14501 intel_ddi_init(dev_priv, PORT_B);
14502 intel_ddi_init(dev_priv, PORT_C);
14503 intel_ddi_init(dev_priv, PORT_D);
14504 intel_ddi_init(dev_priv, PORT_E);
14505 /*
14506 * On some ICL SKUs port F is not present. No strap bits for
14507 * this, so rely on VBT.
14508 * Work around broken VBTs on SKUs known to have no port F.
14509 */
14510 if (IS_ICL_WITH_PORT_F(dev_priv) &&
14511 intel_bios_is_port_present(dev_priv, PORT_F))
14512 intel_ddi_init(dev_priv, PORT_F);
14513
14514 icl_dsi_init(dev_priv);
14515 } else if (IS_GEN9_LP(dev_priv)) {
14516 /*
14517 * FIXME: Broxton doesn't support port detection via the
14518 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14519 * detect the ports.
14520 */
14521 intel_ddi_init(dev_priv, PORT_A);
14522 intel_ddi_init(dev_priv, PORT_B);
14523 intel_ddi_init(dev_priv, PORT_C);
14524
14525 vlv_dsi_init(dev_priv);
14526 } else if (HAS_DDI(dev_priv)) {
14527 int found;
14528
14529 if (intel_ddi_crt_present(dev_priv))
14530 intel_crt_init(dev_priv);
14531
14532 /*
14533 * Haswell uses DDI functions to detect digital outputs.
14534 * On SKL pre-D0 the strap isn't connected, so we assume
14535 * it's there.
14536 */
14537 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14538 /* WaIgnoreDDIAStrap: skl */
14539 if (found || IS_GEN9_BC(dev_priv))
14540 intel_ddi_init(dev_priv, PORT_A);
14541
14542 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14543 * register */
14544 found = I915_READ(SFUSE_STRAP);
14545
14546 if (found & SFUSE_STRAP_DDIB_DETECTED)
14547 intel_ddi_init(dev_priv, PORT_B);
14548 if (found & SFUSE_STRAP_DDIC_DETECTED)
14549 intel_ddi_init(dev_priv, PORT_C);
14550 if (found & SFUSE_STRAP_DDID_DETECTED)
14551 intel_ddi_init(dev_priv, PORT_D);
14552 if (found & SFUSE_STRAP_DDIF_DETECTED)
14553 intel_ddi_init(dev_priv, PORT_F);
14554 /*
14555 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14556 */
14557 if (IS_GEN9_BC(dev_priv) &&
14558 intel_bios_is_port_present(dev_priv, PORT_E))
14559 intel_ddi_init(dev_priv, PORT_E);
14560
14561 } else if (HAS_PCH_SPLIT(dev_priv)) {
14562 int found;
14563
14564 /*
14565 * intel_edp_init_connector() depends on this completing first,
14566 * to prevent the registration of both eDP and LVDS and the
14567 * incorrect sharing of the PPS.
14568 */
14569 intel_lvds_init(dev_priv);
14570 intel_crt_init(dev_priv);
14571
14572 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14573
14574 if (ilk_has_edp_a(dev_priv))
14575 intel_dp_init(dev_priv, DP_A, PORT_A);
14576
14577 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14578 /* PCH SDVOB multiplex with HDMIB */
14579 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14580 if (!found)
14581 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14582 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14583 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14584 }
14585
14586 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14587 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14588
14589 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14590 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14591
14592 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14593 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14594
14595 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14596 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14597 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14598 bool has_edp, has_port;
14599
14600 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
14601 intel_crt_init(dev_priv);
14602
14603 /*
14604 * The DP_DETECTED bit is the latched state of the DDC
14605 * SDA pin at boot. However since eDP doesn't require DDC
14606 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14607 * eDP ports may have been muxed to an alternate function.
14608 * Thus we can't rely on the DP_DETECTED bit alone to detect
14609 * eDP ports. Consult the VBT as well as DP_DETECTED to
14610 * detect eDP ports.
14611 *
14612 * Sadly the straps seem to be missing sometimes even for HDMI
14613 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14614 * and VBT for the presence of the port. Additionally we can't
14615 * trust the port type the VBT declares as we've seen at least
14616 * HDMI ports that the VBT claim are DP or eDP.
14617 */
14618 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14619 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14620 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14621 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14622 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14623 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14624
14625 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14626 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14627 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14628 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14629 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14630 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14631
14632 if (IS_CHERRYVIEW(dev_priv)) {
14633 /*
14634 * eDP not supported on port D,
14635 * so no need to worry about it
14636 */
14637 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14638 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14639 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14640 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14641 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14642 }
14643
14644 vlv_dsi_init(dev_priv);
14645 } else if (IS_PINEVIEW(dev_priv)) {
14646 intel_lvds_init(dev_priv);
14647 intel_crt_init(dev_priv);
14648 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
14649 bool found = false;
14650
14651 if (IS_MOBILE(dev_priv))
14652 intel_lvds_init(dev_priv);
14653
14654 intel_crt_init(dev_priv);
14655
14656 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14657 DRM_DEBUG_KMS("probing SDVOB\n");
14658 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14659 if (!found && IS_G4X(dev_priv)) {
14660 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14661 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14662 }
14663
14664 if (!found && IS_G4X(dev_priv))
14665 intel_dp_init(dev_priv, DP_B, PORT_B);
14666 }
14667
14668 /* Before G4X SDVOC doesn't have its own detect register */
14669
14670 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14671 DRM_DEBUG_KMS("probing SDVOC\n");
14672 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14673 }
14674
14675 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14676
14677 if (IS_G4X(dev_priv)) {
14678 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14679 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14680 }
14681 if (IS_G4X(dev_priv))
14682 intel_dp_init(dev_priv, DP_C, PORT_C);
14683 }
14684
14685 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14686 intel_dp_init(dev_priv, DP_D, PORT_D);
14687
14688 if (SUPPORTS_TV(dev_priv))
14689 intel_tv_init(dev_priv);
14690 } else if (IS_GEN(dev_priv, 2)) {
14691 if (IS_I85X(dev_priv))
14692 intel_lvds_init(dev_priv);
14693
14694 intel_crt_init(dev_priv);
14695 intel_dvo_init(dev_priv);
14696 }
14697
14698 intel_psr_init(dev_priv);
14699
14700 for_each_intel_encoder(&dev_priv->drm, encoder) {
14701 encoder->base.possible_crtcs = encoder->crtc_mask;
14702 encoder->base.possible_clones =
14703 intel_encoder_clones(encoder);
14704 }
14705
14706 intel_init_pch_refclk(dev_priv);
14707
14708 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14709 }
14710
14711 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14712 {
14713 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14714 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14715
14716 drm_framebuffer_cleanup(fb);
14717
14718 i915_gem_object_lock(obj);
14719 WARN_ON(!obj->framebuffer_references--);
14720 i915_gem_object_unlock(obj);
14721
14722 i915_gem_object_put(obj);
14723
14724 kfree(intel_fb);
14725 }
14726
14727 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14728 struct drm_file *file,
14729 unsigned int *handle)
14730 {
14731 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14732
14733 if (obj->userptr.mm) {
14734 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14735 return -EINVAL;
14736 }
14737
14738 return drm_gem_handle_create(file, &obj->base, handle);
14739 }
14740
14741 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14742 struct drm_file *file,
14743 unsigned flags, unsigned color,
14744 struct drm_clip_rect *clips,
14745 unsigned num_clips)
14746 {
14747 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14748
14749 i915_gem_object_flush_if_display(obj);
14750 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14751
14752 return 0;
14753 }
14754
14755 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14756 .destroy = intel_user_framebuffer_destroy,
14757 .create_handle = intel_user_framebuffer_create_handle,
14758 .dirty = intel_user_framebuffer_dirty,
14759 };
14760
14761 static
14762 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14763 u32 pixel_format, u64 fb_modifier)
14764 {
14765 struct intel_crtc *crtc;
14766 struct intel_plane *plane;
14767
14768 /*
14769 * We assume the primary plane for pipe A has
14770 * the highest stride limits of them all.
14771 */
14772 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14773 plane = to_intel_plane(crtc->base.primary);
14774
14775 return plane->max_stride(plane, pixel_format, fb_modifier,
14776 DRM_MODE_ROTATE_0);
14777 }
14778
14779 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14780 struct drm_i915_gem_object *obj,
14781 struct drm_mode_fb_cmd2 *mode_cmd)
14782 {
14783 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14784 struct drm_framebuffer *fb = &intel_fb->base;
14785 u32 pitch_limit;
14786 unsigned int tiling, stride;
14787 int ret = -EINVAL;
14788 int i;
14789
14790 i915_gem_object_lock(obj);
14791 obj->framebuffer_references++;
14792 tiling = i915_gem_object_get_tiling(obj);
14793 stride = i915_gem_object_get_stride(obj);
14794 i915_gem_object_unlock(obj);
14795
14796 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14797 /*
14798 * If there's a fence, enforce that
14799 * the fb modifier and tiling mode match.
14800 */
14801 if (tiling != I915_TILING_NONE &&
14802 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14803 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14804 goto err;
14805 }
14806 } else {
14807 if (tiling == I915_TILING_X) {
14808 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14809 } else if (tiling == I915_TILING_Y) {
14810 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14811 goto err;
14812 }
14813 }
14814
14815 if (!drm_any_plane_has_format(&dev_priv->drm,
14816 mode_cmd->pixel_format,
14817 mode_cmd->modifier[0])) {
14818 struct drm_format_name_buf format_name;
14819
14820 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
14821 drm_get_format_name(mode_cmd->pixel_format,
14822 &format_name),
14823 mode_cmd->modifier[0]);
14824 goto err;
14825 }
14826
14827 /*
14828 * gen2/3 display engine uses the fence if present,
14829 * so the tiling mode must match the fb modifier exactly.
14830 */
14831 if (INTEL_GEN(dev_priv) < 4 &&
14832 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14833 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14834 goto err;
14835 }
14836
14837 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
14838 mode_cmd->modifier[0]);
14839 if (mode_cmd->pitches[0] > pitch_limit) {
14840 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14841 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14842 "tiled" : "linear",
14843 mode_cmd->pitches[0], pitch_limit);
14844 goto err;
14845 }
14846
14847 /*
14848 * If there's a fence, enforce that
14849 * the fb pitch and fence stride match.
14850 */
14851 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14852 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14853 mode_cmd->pitches[0], stride);
14854 goto err;
14855 }
14856
14857 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14858 if (mode_cmd->offsets[0] != 0)
14859 goto err;
14860
14861 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14862
14863 for (i = 0; i < fb->format->num_planes; i++) {
14864 u32 stride_alignment;
14865
14866 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14867 DRM_DEBUG_KMS("bad plane %d handle\n", i);
14868 goto err;
14869 }
14870
14871 stride_alignment = intel_fb_stride_alignment(fb, i);
14872
14873 /*
14874 * Display WA #0531: skl,bxt,kbl,glk
14875 *
14876 * Render decompression and plane width > 3840
14877 * combined with horizontal panning requires the
14878 * plane stride to be a multiple of 4. We'll just
14879 * require the entire fb to accommodate that to avoid
14880 * potential runtime errors at plane configuration time.
14881 */
14882 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
14883 is_ccs_modifier(fb->modifier))
14884 stride_alignment *= 4;
14885
14886 if (fb->pitches[i] & (stride_alignment - 1)) {
14887 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14888 i, fb->pitches[i], stride_alignment);
14889 goto err;
14890 }
14891
14892 fb->obj[i] = &obj->base;
14893 }
14894
14895 ret = intel_fill_fb_info(dev_priv, fb);
14896 if (ret)
14897 goto err;
14898
14899 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14900 if (ret) {
14901 DRM_ERROR("framebuffer init failed %d\n", ret);
14902 goto err;
14903 }
14904
14905 return 0;
14906
14907 err:
14908 i915_gem_object_lock(obj);
14909 obj->framebuffer_references--;
14910 i915_gem_object_unlock(obj);
14911 return ret;
14912 }
14913
14914 static struct drm_framebuffer *
14915 intel_user_framebuffer_create(struct drm_device *dev,
14916 struct drm_file *filp,
14917 const struct drm_mode_fb_cmd2 *user_mode_cmd)
14918 {
14919 struct drm_framebuffer *fb;
14920 struct drm_i915_gem_object *obj;
14921 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14922
14923 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14924 if (!obj)
14925 return ERR_PTR(-ENOENT);
14926
14927 fb = intel_framebuffer_create(obj, &mode_cmd);
14928 if (IS_ERR(fb))
14929 i915_gem_object_put(obj);
14930
14931 return fb;
14932 }
14933
14934 static void intel_atomic_state_free(struct drm_atomic_state *state)
14935 {
14936 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14937
14938 drm_atomic_state_default_release(state);
14939
14940 i915_sw_fence_fini(&intel_state->commit_ready);
14941
14942 kfree(state);
14943 }
14944
14945 static enum drm_mode_status
14946 intel_mode_valid(struct drm_device *dev,
14947 const struct drm_display_mode *mode)
14948 {
14949 struct drm_i915_private *dev_priv = to_i915(dev);
14950 int hdisplay_max, htotal_max;
14951 int vdisplay_max, vtotal_max;
14952
14953 /*
14954 * Can't reject DBLSCAN here because Xorg ddxen can add piles
14955 * of DBLSCAN modes to the output's mode list when they detect
14956 * the scaling mode property on the connector. And they don't
14957 * ask the kernel to validate those modes in any way until
14958 * modeset time at which point the client gets a protocol error.
14959 * So in order to not upset those clients we silently ignore the
14960 * DBLSCAN flag on such connectors. For other connectors we will
14961 * reject modes with the DBLSCAN flag in encoder->compute_config().
14962 * And we always reject DBLSCAN modes in connector->mode_valid()
14963 * as we never want such modes on the connector's mode list.
14964 */
14965
14966 if (mode->vscan > 1)
14967 return MODE_NO_VSCAN;
14968
14969 if (mode->flags & DRM_MODE_FLAG_HSKEW)
14970 return MODE_H_ILLEGAL;
14971
14972 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14973 DRM_MODE_FLAG_NCSYNC |
14974 DRM_MODE_FLAG_PCSYNC))
14975 return MODE_HSYNC;
14976
14977 if (mode->flags & (DRM_MODE_FLAG_BCAST |
14978 DRM_MODE_FLAG_PIXMUX |
14979 DRM_MODE_FLAG_CLKDIV2))
14980 return MODE_BAD;
14981
14982 if (INTEL_GEN(dev_priv) >= 9 ||
14983 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14984 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14985 vdisplay_max = 4096;
14986 htotal_max = 8192;
14987 vtotal_max = 8192;
14988 } else if (INTEL_GEN(dev_priv) >= 3) {
14989 hdisplay_max = 4096;
14990 vdisplay_max = 4096;
14991 htotal_max = 8192;
14992 vtotal_max = 8192;
14993 } else {
14994 hdisplay_max = 2048;
14995 vdisplay_max = 2048;
14996 htotal_max = 4096;
14997 vtotal_max = 4096;
14998 }
14999
15000 if (mode->hdisplay > hdisplay_max ||
15001 mode->hsync_start > htotal_max ||
15002 mode->hsync_end > htotal_max ||
15003 mode->htotal > htotal_max)
15004 return MODE_H_ILLEGAL;
15005
15006 if (mode->vdisplay > vdisplay_max ||
15007 mode->vsync_start > vtotal_max ||
15008 mode->vsync_end > vtotal_max ||
15009 mode->vtotal > vtotal_max)
15010 return MODE_V_ILLEGAL;
15011
15012 return MODE_OK;
15013 }
15014
15015 static const struct drm_mode_config_funcs intel_mode_funcs = {
15016 .fb_create = intel_user_framebuffer_create,
15017 .get_format_info = intel_get_format_info,
15018 .output_poll_changed = intel_fbdev_output_poll_changed,
15019 .mode_valid = intel_mode_valid,
15020 .atomic_check = intel_atomic_check,
15021 .atomic_commit = intel_atomic_commit,
15022 .atomic_state_alloc = intel_atomic_state_alloc,
15023 .atomic_state_clear = intel_atomic_state_clear,
15024 .atomic_state_free = intel_atomic_state_free,
15025 };
15026
15027 /**
15028 * intel_init_display_hooks - initialize the display modesetting hooks
15029 * @dev_priv: device private
15030 */
15031 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15032 {
15033 intel_init_cdclk_hooks(dev_priv);
15034
15035 if (INTEL_GEN(dev_priv) >= 9) {
15036 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15037 dev_priv->display.get_initial_plane_config =
15038 skylake_get_initial_plane_config;
15039 dev_priv->display.crtc_compute_clock =
15040 haswell_crtc_compute_clock;
15041 dev_priv->display.crtc_enable = haswell_crtc_enable;
15042 dev_priv->display.crtc_disable = haswell_crtc_disable;
15043 } else if (HAS_DDI(dev_priv)) {
15044 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15045 dev_priv->display.get_initial_plane_config =
15046 i9xx_get_initial_plane_config;
15047 dev_priv->display.crtc_compute_clock =
15048 haswell_crtc_compute_clock;
15049 dev_priv->display.crtc_enable = haswell_crtc_enable;
15050 dev_priv->display.crtc_disable = haswell_crtc_disable;
15051 } else if (HAS_PCH_SPLIT(dev_priv)) {
15052 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15053 dev_priv->display.get_initial_plane_config =
15054 i9xx_get_initial_plane_config;
15055 dev_priv->display.crtc_compute_clock =
15056 ironlake_crtc_compute_clock;
15057 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15058 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15059 } else if (IS_CHERRYVIEW(dev_priv)) {
15060 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15061 dev_priv->display.get_initial_plane_config =
15062 i9xx_get_initial_plane_config;
15063 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15064 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15065 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15066 } else if (IS_VALLEYVIEW(dev_priv)) {
15067 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15068 dev_priv->display.get_initial_plane_config =
15069 i9xx_get_initial_plane_config;
15070 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15071 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15072 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15073 } else if (IS_G4X(dev_priv)) {
15074 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15075 dev_priv->display.get_initial_plane_config =
15076 i9xx_get_initial_plane_config;
15077 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15078 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15079 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15080 } else if (IS_PINEVIEW(dev_priv)) {
15081 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15082 dev_priv->display.get_initial_plane_config =
15083 i9xx_get_initial_plane_config;
15084 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15085 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15086 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15087 } else if (!IS_GEN(dev_priv, 2)) {
15088 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15089 dev_priv->display.get_initial_plane_config =
15090 i9xx_get_initial_plane_config;
15091 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15092 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15093 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15094 } else {
15095 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15096 dev_priv->display.get_initial_plane_config =
15097 i9xx_get_initial_plane_config;
15098 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15099 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15100 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15101 }
15102
15103 if (IS_GEN(dev_priv, 5)) {
15104 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15105 } else if (IS_GEN(dev_priv, 6)) {
15106 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15107 } else if (IS_IVYBRIDGE(dev_priv)) {
15108 /* FIXME: detect B0+ stepping and use auto training */
15109 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15110 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15111 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15112 }
15113
15114 if (INTEL_GEN(dev_priv) >= 9)
15115 dev_priv->display.update_crtcs = skl_update_crtcs;
15116 else
15117 dev_priv->display.update_crtcs = intel_update_crtcs;
15118 }
15119
15120 /* Disable the VGA plane that we never use */
15121 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15122 {
15123 struct pci_dev *pdev = dev_priv->drm.pdev;
15124 u8 sr1;
15125 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15126
15127 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15128 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15129 outb(SR01, VGA_SR_INDEX);
15130 sr1 = inb(VGA_SR_DATA);
15131 outb(sr1 | 1<<5, VGA_SR_DATA);
15132 vga_put(pdev, VGA_RSRC_LEGACY_IO);
15133 udelay(300);
15134
15135 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15136 POSTING_READ(vga_reg);
15137 }
15138
15139 void intel_modeset_init_hw(struct drm_device *dev)
15140 {
15141 struct drm_i915_private *dev_priv = to_i915(dev);
15142
15143 intel_update_cdclk(dev_priv);
15144 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15145 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15146 }
15147
15148 /*
15149 * Calculate what we think the watermarks should be for the state we've read
15150 * out of the hardware and then immediately program those watermarks so that
15151 * we ensure the hardware settings match our internal state.
15152 *
15153 * We can calculate what we think WM's should be by creating a duplicate of the
15154 * current state (which was constructed during hardware readout) and running it
15155 * through the atomic check code to calculate new watermark values in the
15156 * state object.
15157 */
15158 static void sanitize_watermarks(struct drm_device *dev)
15159 {
15160 struct drm_i915_private *dev_priv = to_i915(dev);
15161 struct drm_atomic_state *state;
15162 struct intel_atomic_state *intel_state;
15163 struct drm_crtc *crtc;
15164 struct drm_crtc_state *cstate;
15165 struct drm_modeset_acquire_ctx ctx;
15166 int ret;
15167 int i;
15168
15169 /* Only supported on platforms that use atomic watermark design */
15170 if (!dev_priv->display.optimize_watermarks)
15171 return;
15172
15173 /*
15174 * We need to hold connection_mutex before calling duplicate_state so
15175 * that the connector loop is protected.
15176 */
15177 drm_modeset_acquire_init(&ctx, 0);
15178 retry:
15179 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15180 if (ret == -EDEADLK) {
15181 drm_modeset_backoff(&ctx);
15182 goto retry;
15183 } else if (WARN_ON(ret)) {
15184 goto fail;
15185 }
15186
15187 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15188 if (WARN_ON(IS_ERR(state)))
15189 goto fail;
15190
15191 intel_state = to_intel_atomic_state(state);
15192
15193 /*
15194 * Hardware readout is the only time we don't want to calculate
15195 * intermediate watermarks (since we don't trust the current
15196 * watermarks).
15197 */
15198 if (!HAS_GMCH(dev_priv))
15199 intel_state->skip_intermediate_wm = true;
15200
15201 ret = intel_atomic_check(dev, state);
15202 if (ret) {
15203 /*
15204 * If we fail here, it means that the hardware appears to be
15205 * programmed in a way that shouldn't be possible, given our
15206 * understanding of watermark requirements. This might mean a
15207 * mistake in the hardware readout code or a mistake in the
15208 * watermark calculations for a given platform. Raise a WARN
15209 * so that this is noticeable.
15210 *
15211 * If this actually happens, we'll have to just leave the
15212 * BIOS-programmed watermarks untouched and hope for the best.
15213 */
15214 WARN(true, "Could not determine valid watermarks for inherited state\n");
15215 goto put_state;
15216 }
15217
15218 /* Write calculated watermark values back */
15219 for_each_new_crtc_in_state(state, crtc, cstate, i) {
15220 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15221
15222 cs->wm.need_postvbl_update = true;
15223 dev_priv->display.optimize_watermarks(intel_state, cs);
15224
15225 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15226 }
15227
15228 put_state:
15229 drm_atomic_state_put(state);
15230 fail:
15231 drm_modeset_drop_locks(&ctx);
15232 drm_modeset_acquire_fini(&ctx);
15233 }
15234
15235 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15236 {
15237 if (IS_GEN(dev_priv, 5)) {
15238 u32 fdi_pll_clk =
15239 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15240
15241 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15242 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15243 dev_priv->fdi_pll_freq = 270000;
15244 } else {
15245 return;
15246 }
15247
15248 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15249 }
15250
15251 static int intel_initial_commit(struct drm_device *dev)
15252 {
15253 struct drm_atomic_state *state = NULL;
15254 struct drm_modeset_acquire_ctx ctx;
15255 struct drm_crtc *crtc;
15256 struct drm_crtc_state *crtc_state;
15257 int ret = 0;
15258
15259 state = drm_atomic_state_alloc(dev);
15260 if (!state)
15261 return -ENOMEM;
15262
15263 drm_modeset_acquire_init(&ctx, 0);
15264
15265 retry:
15266 state->acquire_ctx = &ctx;
15267
15268 drm_for_each_crtc(crtc, dev) {
15269 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15270 if (IS_ERR(crtc_state)) {
15271 ret = PTR_ERR(crtc_state);
15272 goto out;
15273 }
15274
15275 if (crtc_state->active) {
15276 ret = drm_atomic_add_affected_planes(state, crtc);
15277 if (ret)
15278 goto out;
15279
15280 /*
15281 * FIXME hack to force a LUT update to avoid the
15282 * plane update forcing the pipe gamma on without
15283 * having a proper LUT loaded. Remove once we
15284 * have readout for pipe gamma enable.
15285 */
15286 crtc_state->color_mgmt_changed = true;
15287 }
15288 }
15289
15290 ret = drm_atomic_commit(state);
15291
15292 out:
15293 if (ret == -EDEADLK) {
15294 drm_atomic_state_clear(state);
15295 drm_modeset_backoff(&ctx);
15296 goto retry;
15297 }
15298
15299 drm_atomic_state_put(state);
15300
15301 drm_modeset_drop_locks(&ctx);
15302 drm_modeset_acquire_fini(&ctx);
15303
15304 return ret;
15305 }
15306
15307 int intel_modeset_init(struct drm_device *dev)
15308 {
15309 struct drm_i915_private *dev_priv = to_i915(dev);
15310 struct i915_ggtt *ggtt = &dev_priv->ggtt;
15311 enum pipe pipe;
15312 struct intel_crtc *crtc;
15313 int ret;
15314
15315 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15316
15317 drm_mode_config_init(dev);
15318
15319 dev->mode_config.min_width = 0;
15320 dev->mode_config.min_height = 0;
15321
15322 dev->mode_config.preferred_depth = 24;
15323 dev->mode_config.prefer_shadow = 1;
15324
15325 dev->mode_config.allow_fb_modifiers = true;
15326
15327 dev->mode_config.funcs = &intel_mode_funcs;
15328
15329 init_llist_head(&dev_priv->atomic_helper.free_list);
15330 INIT_WORK(&dev_priv->atomic_helper.free_work,
15331 intel_atomic_helper_free_state_worker);
15332
15333 intel_init_quirks(dev_priv);
15334
15335 intel_fbc_init(dev_priv);
15336
15337 intel_init_pm(dev_priv);
15338
15339 /*
15340 * There may be no VBT; and if the BIOS enabled SSC we can
15341 * just keep using it to avoid unnecessary flicker. Whereas if the
15342 * BIOS isn't using it, don't assume it will work even if the VBT
15343 * indicates as much.
15344 */
15345 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15346 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15347 DREF_SSC1_ENABLE);
15348
15349 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15350 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15351 bios_lvds_use_ssc ? "en" : "dis",
15352 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15353 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15354 }
15355 }
15356
15357 /* maximum framebuffer dimensions */
15358 if (IS_GEN(dev_priv, 2)) {
15359 dev->mode_config.max_width = 2048;
15360 dev->mode_config.max_height = 2048;
15361 } else if (IS_GEN(dev_priv, 3)) {
15362 dev->mode_config.max_width = 4096;
15363 dev->mode_config.max_height = 4096;
15364 } else {
15365 dev->mode_config.max_width = 8192;
15366 dev->mode_config.max_height = 8192;
15367 }
15368
15369 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15370 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15371 dev->mode_config.cursor_height = 1023;
15372 } else if (IS_GEN(dev_priv, 2)) {
15373 dev->mode_config.cursor_width = 64;
15374 dev->mode_config.cursor_height = 64;
15375 } else {
15376 dev->mode_config.cursor_width = 256;
15377 dev->mode_config.cursor_height = 256;
15378 }
15379
15380 dev->mode_config.fb_base = ggtt->gmadr.start;
15381
15382 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15383 INTEL_INFO(dev_priv)->num_pipes,
15384 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15385
15386 for_each_pipe(dev_priv, pipe) {
15387 ret = intel_crtc_init(dev_priv, pipe);
15388 if (ret) {
15389 drm_mode_config_cleanup(dev);
15390 return ret;
15391 }
15392 }
15393
15394 intel_shared_dpll_init(dev);
15395 intel_update_fdi_pll_freq(dev_priv);
15396
15397 intel_update_czclk(dev_priv);
15398 intel_modeset_init_hw(dev);
15399
15400 if (dev_priv->max_cdclk_freq == 0)
15401 intel_update_max_cdclk(dev_priv);
15402
15403 /* Just disable it once at startup */
15404 i915_disable_vga(dev_priv);
15405 intel_setup_outputs(dev_priv);
15406
15407 drm_modeset_lock_all(dev);
15408 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15409 drm_modeset_unlock_all(dev);
15410
15411 for_each_intel_crtc(dev, crtc) {
15412 struct intel_initial_plane_config plane_config = {};
15413
15414 if (!crtc->active)
15415 continue;
15416
15417 /*
15418 * Note that reserving the BIOS fb up front prevents us
15419 * from stuffing other stolen allocations like the ring
15420 * on top. This prevents some ugliness at boot time, and
15421 * can even allow for smooth boot transitions if the BIOS
15422 * fb is large enough for the active pipe configuration.
15423 */
15424 dev_priv->display.get_initial_plane_config(crtc,
15425 &plane_config);
15426
15427 /*
15428 * If the fb is shared between multiple heads, we'll
15429 * just get the first one.
15430 */
15431 intel_find_initial_plane_obj(crtc, &plane_config);
15432 }
15433
15434 /*
15435 * Make sure hardware watermarks really match the state we read out.
15436 * Note that we need to do this after reconstructing the BIOS fb's
15437 * since the watermark calculation done here will use pstate->fb.
15438 */
15439 if (!HAS_GMCH(dev_priv))
15440 sanitize_watermarks(dev);
15441
15442 /*
15443 * Force all active planes to recompute their states. So that on
15444 * mode_setcrtc after probe, all the intel_plane_state variables
15445 * are already calculated and there is no assert_plane warnings
15446 * during bootup.
15447 */
15448 ret = intel_initial_commit(dev);
15449 if (ret)
15450 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15451
15452 return 0;
15453 }
15454
15455 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15456 {
15457 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15458 /* 640x480@60Hz, ~25175 kHz */
15459 struct dpll clock = {
15460 .m1 = 18,
15461 .m2 = 7,
15462 .p1 = 13,
15463 .p2 = 4,
15464 .n = 2,
15465 };
15466 u32 dpll, fp;
15467 int i;
15468
15469 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15470
15471 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15472 pipe_name(pipe), clock.vco, clock.dot);
15473
15474 fp = i9xx_dpll_compute_fp(&clock);
15475 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15476 DPLL_VGA_MODE_DIS |
15477 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15478 PLL_P2_DIVIDE_BY_4 |
15479 PLL_REF_INPUT_DREFCLK |
15480 DPLL_VCO_ENABLE;
15481
15482 I915_WRITE(FP0(pipe), fp);
15483 I915_WRITE(FP1(pipe), fp);
15484
15485 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15486 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15487 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15488 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15489 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15490 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15491 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15492
15493 /*
15494 * Apparently we need to have VGA mode enabled prior to changing
15495 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15496 * dividers, even though the register value does change.
15497 */
15498 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15499 I915_WRITE(DPLL(pipe), dpll);
15500
15501 /* Wait for the clocks to stabilize. */
15502 POSTING_READ(DPLL(pipe));
15503 udelay(150);
15504
15505 /* The pixel multiplier can only be updated once the
15506 * DPLL is enabled and the clocks are stable.
15507 *
15508 * So write it again.
15509 */
15510 I915_WRITE(DPLL(pipe), dpll);
15511
15512 /* We do this three times for luck */
15513 for (i = 0; i < 3 ; i++) {
15514 I915_WRITE(DPLL(pipe), dpll);
15515 POSTING_READ(DPLL(pipe));
15516 udelay(150); /* wait for warmup */
15517 }
15518
15519 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15520 POSTING_READ(PIPECONF(pipe));
15521
15522 intel_wait_for_pipe_scanline_moving(crtc);
15523 }
15524
15525 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15526 {
15527 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15528
15529 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15530 pipe_name(pipe));
15531
15532 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15533 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15534 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15535 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15536 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15537
15538 I915_WRITE(PIPECONF(pipe), 0);
15539 POSTING_READ(PIPECONF(pipe));
15540
15541 intel_wait_for_pipe_scanline_stopped(crtc);
15542
15543 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15544 POSTING_READ(DPLL(pipe));
15545 }
15546
15547 static void
15548 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15549 {
15550 struct intel_crtc *crtc;
15551
15552 if (INTEL_GEN(dev_priv) >= 4)
15553 return;
15554
15555 for_each_intel_crtc(&dev_priv->drm, crtc) {
15556 struct intel_plane *plane =
15557 to_intel_plane(crtc->base.primary);
15558 struct intel_crtc *plane_crtc;
15559 enum pipe pipe;
15560
15561 if (!plane->get_hw_state(plane, &pipe))
15562 continue;
15563
15564 if (pipe == crtc->pipe)
15565 continue;
15566
15567 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15568 plane->base.base.id, plane->base.name);
15569
15570 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15571 intel_plane_disable_noatomic(plane_crtc, plane);
15572 }
15573 }
15574
15575 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15576 {
15577 struct drm_device *dev = crtc->base.dev;
15578 struct intel_encoder *encoder;
15579
15580 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15581 return true;
15582
15583 return false;
15584 }
15585
15586 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15587 {
15588 struct drm_device *dev = encoder->base.dev;
15589 struct intel_connector *connector;
15590
15591 for_each_connector_on_encoder(dev, &encoder->base, connector)
15592 return connector;
15593
15594 return NULL;
15595 }
15596
15597 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15598 enum pipe pch_transcoder)
15599 {
15600 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15601 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15602 }
15603
15604 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15605 struct drm_modeset_acquire_ctx *ctx)
15606 {
15607 struct drm_device *dev = crtc->base.dev;
15608 struct drm_i915_private *dev_priv = to_i915(dev);
15609 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15610 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15611
15612 /* Clear any frame start delays used for debugging left by the BIOS */
15613 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15614 i915_reg_t reg = PIPECONF(cpu_transcoder);
15615
15616 I915_WRITE(reg,
15617 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15618 }
15619
15620 if (crtc_state->base.active) {
15621 struct intel_plane *plane;
15622
15623 /* Disable everything but the primary plane */
15624 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15625 const struct intel_plane_state *plane_state =
15626 to_intel_plane_state(plane->base.state);
15627
15628 if (plane_state->base.visible &&
15629 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15630 intel_plane_disable_noatomic(crtc, plane);
15631 }
15632
15633 /*
15634 * Disable any background color set by the BIOS, but enable the
15635 * gamma and CSC to match how we program our planes.
15636 */
15637 if (INTEL_GEN(dev_priv) >= 9)
15638 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
15639 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
15640 SKL_BOTTOM_COLOR_CSC_ENABLE);
15641 }
15642
15643 /* Adjust the state of the output pipe according to whether we
15644 * have active connectors/encoders. */
15645 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15646 intel_crtc_disable_noatomic(&crtc->base, ctx);
15647
15648 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
15649 /*
15650 * We start out with underrun reporting disabled to avoid races.
15651 * For correct bookkeeping mark this on active crtcs.
15652 *
15653 * Also on gmch platforms we dont have any hardware bits to
15654 * disable the underrun reporting. Which means we need to start
15655 * out with underrun reporting disabled also on inactive pipes,
15656 * since otherwise we'll complain about the garbage we read when
15657 * e.g. coming up after runtime pm.
15658 *
15659 * No protection against concurrent access is required - at
15660 * worst a fifo underrun happens which also sets this to false.
15661 */
15662 crtc->cpu_fifo_underrun_disabled = true;
15663 /*
15664 * We track the PCH trancoder underrun reporting state
15665 * within the crtc. With crtc for pipe A housing the underrun
15666 * reporting state for PCH transcoder A, crtc for pipe B housing
15667 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15668 * and marking underrun reporting as disabled for the non-existing
15669 * PCH transcoders B and C would prevent enabling the south
15670 * error interrupt (see cpt_can_enable_serr_int()).
15671 */
15672 if (has_pch_trancoder(dev_priv, crtc->pipe))
15673 crtc->pch_fifo_underrun_disabled = true;
15674 }
15675 }
15676
15677 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15678 {
15679 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15680
15681 /*
15682 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15683 * the hardware when a high res displays plugged in. DPLL P
15684 * divider is zero, and the pipe timings are bonkers. We'll
15685 * try to disable everything in that case.
15686 *
15687 * FIXME would be nice to be able to sanitize this state
15688 * without several WARNs, but for now let's take the easy
15689 * road.
15690 */
15691 return IS_GEN(dev_priv, 6) &&
15692 crtc_state->base.active &&
15693 crtc_state->shared_dpll &&
15694 crtc_state->port_clock == 0;
15695 }
15696
15697 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15698 {
15699 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15700 struct intel_connector *connector;
15701 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
15702 struct intel_crtc_state *crtc_state = crtc ?
15703 to_intel_crtc_state(crtc->base.state) : NULL;
15704
15705 /* We need to check both for a crtc link (meaning that the
15706 * encoder is active and trying to read from a pipe) and the
15707 * pipe itself being active. */
15708 bool has_active_crtc = crtc_state &&
15709 crtc_state->base.active;
15710
15711 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
15712 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15713 pipe_name(crtc->pipe));
15714 has_active_crtc = false;
15715 }
15716
15717 connector = intel_encoder_find_connector(encoder);
15718 if (connector && !has_active_crtc) {
15719 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15720 encoder->base.base.id,
15721 encoder->base.name);
15722
15723 /* Connector is active, but has no active pipe. This is
15724 * fallout from our resume register restoring. Disable
15725 * the encoder manually again. */
15726 if (crtc_state) {
15727 struct drm_encoder *best_encoder;
15728
15729 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15730 encoder->base.base.id,
15731 encoder->base.name);
15732
15733 /* avoid oopsing in case the hooks consult best_encoder */
15734 best_encoder = connector->base.state->best_encoder;
15735 connector->base.state->best_encoder = &encoder->base;
15736
15737 if (encoder->disable)
15738 encoder->disable(encoder, crtc_state,
15739 connector->base.state);
15740 if (encoder->post_disable)
15741 encoder->post_disable(encoder, crtc_state,
15742 connector->base.state);
15743
15744 connector->base.state->best_encoder = best_encoder;
15745 }
15746 encoder->base.crtc = NULL;
15747
15748 /* Inconsistent output/port/pipe state happens presumably due to
15749 * a bug in one of the get_hw_state functions. Or someplace else
15750 * in our code, like the register restore mess on resume. Clamp
15751 * things to off as a safer default. */
15752
15753 connector->base.dpms = DRM_MODE_DPMS_OFF;
15754 connector->base.encoder = NULL;
15755 }
15756
15757 /* notify opregion of the sanitized encoder state */
15758 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15759
15760 if (INTEL_GEN(dev_priv) >= 11)
15761 icl_sanitize_encoder_pll_mapping(encoder);
15762 }
15763
15764 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15765 {
15766 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15767
15768 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15769 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15770 i915_disable_vga(dev_priv);
15771 }
15772 }
15773
15774 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15775 {
15776 intel_wakeref_t wakeref;
15777
15778 /*
15779 * This function can be called both from intel_modeset_setup_hw_state or
15780 * at a very early point in our resume sequence, where the power well
15781 * structures are not yet restored. Since this function is at a very
15782 * paranoid "someone might have enabled VGA while we were not looking"
15783 * level, just check if the power well is enabled instead of trying to
15784 * follow the "don't touch the power well if we don't need it" policy
15785 * the rest of the driver uses.
15786 */
15787 wakeref = intel_display_power_get_if_enabled(dev_priv,
15788 POWER_DOMAIN_VGA);
15789 if (!wakeref)
15790 return;
15791
15792 i915_redisable_vga_power_on(dev_priv);
15793
15794 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
15795 }
15796
15797 /* FIXME read out full plane state for all planes */
15798 static void readout_plane_state(struct drm_i915_private *dev_priv)
15799 {
15800 struct intel_plane *plane;
15801 struct intel_crtc *crtc;
15802
15803 for_each_intel_plane(&dev_priv->drm, plane) {
15804 struct intel_plane_state *plane_state =
15805 to_intel_plane_state(plane->base.state);
15806 struct intel_crtc_state *crtc_state;
15807 enum pipe pipe = PIPE_A;
15808 bool visible;
15809
15810 visible = plane->get_hw_state(plane, &pipe);
15811
15812 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15813 crtc_state = to_intel_crtc_state(crtc->base.state);
15814
15815 intel_set_plane_visible(crtc_state, plane_state, visible);
15816
15817 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15818 plane->base.base.id, plane->base.name,
15819 enableddisabled(visible), pipe_name(pipe));
15820 }
15821
15822 for_each_intel_crtc(&dev_priv->drm, crtc) {
15823 struct intel_crtc_state *crtc_state =
15824 to_intel_crtc_state(crtc->base.state);
15825
15826 fixup_active_planes(crtc_state);
15827 }
15828 }
15829
15830 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15831 {
15832 struct drm_i915_private *dev_priv = to_i915(dev);
15833 enum pipe pipe;
15834 struct intel_crtc *crtc;
15835 struct intel_encoder *encoder;
15836 struct intel_connector *connector;
15837 struct drm_connector_list_iter conn_iter;
15838 int i;
15839
15840 dev_priv->active_crtcs = 0;
15841
15842 for_each_intel_crtc(dev, crtc) {
15843 struct intel_crtc_state *crtc_state =
15844 to_intel_crtc_state(crtc->base.state);
15845
15846 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15847 memset(crtc_state, 0, sizeof(*crtc_state));
15848 crtc_state->base.crtc = &crtc->base;
15849
15850 crtc_state->base.active = crtc_state->base.enable =
15851 dev_priv->display.get_pipe_config(crtc, crtc_state);
15852
15853 crtc->base.enabled = crtc_state->base.enable;
15854 crtc->active = crtc_state->base.active;
15855
15856 if (crtc_state->base.active)
15857 dev_priv->active_crtcs |= 1 << crtc->pipe;
15858
15859 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15860 crtc->base.base.id, crtc->base.name,
15861 enableddisabled(crtc_state->base.active));
15862 }
15863
15864 readout_plane_state(dev_priv);
15865
15866 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15867 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15868
15869 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15870 &pll->state.hw_state);
15871 pll->state.crtc_mask = 0;
15872 for_each_intel_crtc(dev, crtc) {
15873 struct intel_crtc_state *crtc_state =
15874 to_intel_crtc_state(crtc->base.state);
15875
15876 if (crtc_state->base.active &&
15877 crtc_state->shared_dpll == pll)
15878 pll->state.crtc_mask |= 1 << crtc->pipe;
15879 }
15880 pll->active_mask = pll->state.crtc_mask;
15881
15882 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15883 pll->info->name, pll->state.crtc_mask, pll->on);
15884 }
15885
15886 for_each_intel_encoder(dev, encoder) {
15887 pipe = 0;
15888
15889 if (encoder->get_hw_state(encoder, &pipe)) {
15890 struct intel_crtc_state *crtc_state;
15891
15892 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15893 crtc_state = to_intel_crtc_state(crtc->base.state);
15894
15895 encoder->base.crtc = &crtc->base;
15896 encoder->get_config(encoder, crtc_state);
15897 } else {
15898 encoder->base.crtc = NULL;
15899 }
15900
15901 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15902 encoder->base.base.id, encoder->base.name,
15903 enableddisabled(encoder->base.crtc),
15904 pipe_name(pipe));
15905 }
15906
15907 drm_connector_list_iter_begin(dev, &conn_iter);
15908 for_each_intel_connector_iter(connector, &conn_iter) {
15909 if (connector->get_hw_state(connector)) {
15910 connector->base.dpms = DRM_MODE_DPMS_ON;
15911
15912 encoder = connector->encoder;
15913 connector->base.encoder = &encoder->base;
15914
15915 if (encoder->base.crtc &&
15916 encoder->base.crtc->state->active) {
15917 /*
15918 * This has to be done during hardware readout
15919 * because anything calling .crtc_disable may
15920 * rely on the connector_mask being accurate.
15921 */
15922 encoder->base.crtc->state->connector_mask |=
15923 drm_connector_mask(&connector->base);
15924 encoder->base.crtc->state->encoder_mask |=
15925 drm_encoder_mask(&encoder->base);
15926 }
15927
15928 } else {
15929 connector->base.dpms = DRM_MODE_DPMS_OFF;
15930 connector->base.encoder = NULL;
15931 }
15932 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15933 connector->base.base.id, connector->base.name,
15934 enableddisabled(connector->base.encoder));
15935 }
15936 drm_connector_list_iter_end(&conn_iter);
15937
15938 for_each_intel_crtc(dev, crtc) {
15939 struct intel_crtc_state *crtc_state =
15940 to_intel_crtc_state(crtc->base.state);
15941 int min_cdclk = 0;
15942
15943 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15944 if (crtc_state->base.active) {
15945 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15946 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15947 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15948 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15949 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15950
15951 /*
15952 * The initial mode needs to be set in order to keep
15953 * the atomic core happy. It wants a valid mode if the
15954 * crtc's enabled, so we do the above call.
15955 *
15956 * But we don't set all the derived state fully, hence
15957 * set a flag to indicate that a full recalculation is
15958 * needed on the next commit.
15959 */
15960 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15961
15962 intel_crtc_compute_pixel_rate(crtc_state);
15963
15964 if (dev_priv->display.modeset_calc_cdclk) {
15965 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
15966 if (WARN_ON(min_cdclk < 0))
15967 min_cdclk = 0;
15968 }
15969
15970 drm_calc_timestamping_constants(&crtc->base,
15971 &crtc_state->base.adjusted_mode);
15972 update_scanline_offset(crtc_state);
15973 }
15974
15975 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
15976 dev_priv->min_voltage_level[crtc->pipe] =
15977 crtc_state->min_voltage_level;
15978
15979 intel_pipe_config_sanity_check(dev_priv, crtc_state);
15980 }
15981 }
15982
15983 static void
15984 get_encoder_power_domains(struct drm_i915_private *dev_priv)
15985 {
15986 struct intel_encoder *encoder;
15987
15988 for_each_intel_encoder(&dev_priv->drm, encoder) {
15989 struct intel_crtc_state *crtc_state;
15990
15991 if (!encoder->get_power_domains)
15992 continue;
15993
15994 /*
15995 * MST-primary and inactive encoders don't have a crtc state
15996 * and neither of these require any power domain references.
15997 */
15998 if (!encoder->base.crtc)
15999 continue;
16000
16001 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16002 encoder->get_power_domains(encoder, crtc_state);
16003 }
16004 }
16005
16006 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16007 {
16008 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16009 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16010 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16011 DARBF_GATING_DIS);
16012
16013 if (IS_HASWELL(dev_priv)) {
16014 /*
16015 * WaRsPkgCStateDisplayPMReq:hsw
16016 * System hang if this isn't done before disabling all planes!
16017 */
16018 I915_WRITE(CHICKEN_PAR1_1,
16019 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16020 }
16021 }
16022
16023 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16024 enum port port, i915_reg_t hdmi_reg)
16025 {
16026 u32 val = I915_READ(hdmi_reg);
16027
16028 if (val & SDVO_ENABLE ||
16029 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16030 return;
16031
16032 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16033 port_name(port));
16034
16035 val &= ~SDVO_PIPE_SEL_MASK;
16036 val |= SDVO_PIPE_SEL(PIPE_A);
16037
16038 I915_WRITE(hdmi_reg, val);
16039 }
16040
16041 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16042 enum port port, i915_reg_t dp_reg)
16043 {
16044 u32 val = I915_READ(dp_reg);
16045
16046 if (val & DP_PORT_EN ||
16047 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16048 return;
16049
16050 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16051 port_name(port));
16052
16053 val &= ~DP_PIPE_SEL_MASK;
16054 val |= DP_PIPE_SEL(PIPE_A);
16055
16056 I915_WRITE(dp_reg, val);
16057 }
16058
16059 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16060 {
16061 /*
16062 * The BIOS may select transcoder B on some of the PCH
16063 * ports even it doesn't enable the port. This would trip
16064 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16065 * Sanitize the transcoder select bits to prevent that. We
16066 * assume that the BIOS never actually enabled the port,
16067 * because if it did we'd actually have to toggle the port
16068 * on and back off to make the transcoder A select stick
16069 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16070 * intel_disable_sdvo()).
16071 */
16072 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16073 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16074 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16075
16076 /* PCH SDVOB multiplex with HDMIB */
16077 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16078 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16079 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16080 }
16081
16082 /* Scan out the current hw modeset state,
16083 * and sanitizes it to the current state
16084 */
16085 static void
16086 intel_modeset_setup_hw_state(struct drm_device *dev,
16087 struct drm_modeset_acquire_ctx *ctx)
16088 {
16089 struct drm_i915_private *dev_priv = to_i915(dev);
16090 struct intel_crtc_state *crtc_state;
16091 struct intel_encoder *encoder;
16092 struct intel_crtc *crtc;
16093 intel_wakeref_t wakeref;
16094 int i;
16095
16096 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16097
16098 intel_early_display_was(dev_priv);
16099 intel_modeset_readout_hw_state(dev);
16100
16101 /* HW state is read out, now we need to sanitize this mess. */
16102 get_encoder_power_domains(dev_priv);
16103
16104 if (HAS_PCH_IBX(dev_priv))
16105 ibx_sanitize_pch_ports(dev_priv);
16106
16107 /*
16108 * intel_sanitize_plane_mapping() may need to do vblank
16109 * waits, so we need vblank interrupts restored beforehand.
16110 */
16111 for_each_intel_crtc(&dev_priv->drm, crtc) {
16112 crtc_state = to_intel_crtc_state(crtc->base.state);
16113
16114 drm_crtc_vblank_reset(&crtc->base);
16115
16116 if (crtc_state->base.active)
16117 intel_crtc_vblank_on(crtc_state);
16118 }
16119
16120 intel_sanitize_plane_mapping(dev_priv);
16121
16122 for_each_intel_encoder(dev, encoder)
16123 intel_sanitize_encoder(encoder);
16124
16125 for_each_intel_crtc(&dev_priv->drm, crtc) {
16126 crtc_state = to_intel_crtc_state(crtc->base.state);
16127 intel_sanitize_crtc(crtc, ctx);
16128 intel_dump_pipe_config(crtc, crtc_state,
16129 "[setup_hw_state]");
16130 }
16131
16132 intel_modeset_update_connector_atomic_state(dev);
16133
16134 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16135 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16136
16137 if (!pll->on || pll->active_mask)
16138 continue;
16139
16140 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16141 pll->info->name);
16142
16143 pll->info->funcs->disable(dev_priv, pll);
16144 pll->on = false;
16145 }
16146
16147 if (IS_G4X(dev_priv)) {
16148 g4x_wm_get_hw_state(dev_priv);
16149 g4x_wm_sanitize(dev_priv);
16150 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16151 vlv_wm_get_hw_state(dev_priv);
16152 vlv_wm_sanitize(dev_priv);
16153 } else if (INTEL_GEN(dev_priv) >= 9) {
16154 skl_wm_get_hw_state(dev_priv);
16155 } else if (HAS_PCH_SPLIT(dev_priv)) {
16156 ilk_wm_get_hw_state(dev_priv);
16157 }
16158
16159 for_each_intel_crtc(dev, crtc) {
16160 u64 put_domains;
16161
16162 crtc_state = to_intel_crtc_state(crtc->base.state);
16163 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16164 if (WARN_ON(put_domains))
16165 modeset_put_power_domains(dev_priv, put_domains);
16166 }
16167
16168 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16169
16170 intel_fbc_init_pipe_state(dev_priv);
16171 }
16172
16173 void intel_display_resume(struct drm_device *dev)
16174 {
16175 struct drm_i915_private *dev_priv = to_i915(dev);
16176 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16177 struct drm_modeset_acquire_ctx ctx;
16178 int ret;
16179
16180 dev_priv->modeset_restore_state = NULL;
16181 if (state)
16182 state->acquire_ctx = &ctx;
16183
16184 drm_modeset_acquire_init(&ctx, 0);
16185
16186 while (1) {
16187 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16188 if (ret != -EDEADLK)
16189 break;
16190
16191 drm_modeset_backoff(&ctx);
16192 }
16193
16194 if (!ret)
16195 ret = __intel_display_resume(dev, state, &ctx);
16196
16197 intel_enable_ipc(dev_priv);
16198 drm_modeset_drop_locks(&ctx);
16199 drm_modeset_acquire_fini(&ctx);
16200
16201 if (ret)
16202 DRM_ERROR("Restoring old state failed with %i\n", ret);
16203 if (state)
16204 drm_atomic_state_put(state);
16205 }
16206
16207 static void intel_hpd_poll_fini(struct drm_device *dev)
16208 {
16209 struct intel_connector *connector;
16210 struct drm_connector_list_iter conn_iter;
16211
16212 /* Kill all the work that may have been queued by hpd. */
16213 drm_connector_list_iter_begin(dev, &conn_iter);
16214 for_each_intel_connector_iter(connector, &conn_iter) {
16215 if (connector->modeset_retry_work.func)
16216 cancel_work_sync(&connector->modeset_retry_work);
16217 if (connector->hdcp.shim) {
16218 cancel_delayed_work_sync(&connector->hdcp.check_work);
16219 cancel_work_sync(&connector->hdcp.prop_work);
16220 }
16221 }
16222 drm_connector_list_iter_end(&conn_iter);
16223 }
16224
16225 void intel_modeset_cleanup(struct drm_device *dev)
16226 {
16227 struct drm_i915_private *dev_priv = to_i915(dev);
16228
16229 flush_workqueue(dev_priv->modeset_wq);
16230
16231 flush_work(&dev_priv->atomic_helper.free_work);
16232 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16233
16234 /*
16235 * Interrupts and polling as the first thing to avoid creating havoc.
16236 * Too much stuff here (turning of connectors, ...) would
16237 * experience fancy races otherwise.
16238 */
16239 intel_irq_uninstall(dev_priv);
16240
16241 /*
16242 * Due to the hpd irq storm handling the hotplug work can re-arm the
16243 * poll handlers. Hence disable polling after hpd handling is shut down.
16244 */
16245 intel_hpd_poll_fini(dev);
16246
16247 /* poll work can call into fbdev, hence clean that up afterwards */
16248 intel_fbdev_fini(dev_priv);
16249
16250 intel_unregister_dsm_handler();
16251
16252 intel_fbc_global_disable(dev_priv);
16253
16254 /* flush any delayed tasks or pending work */
16255 flush_scheduled_work();
16256
16257 drm_mode_config_cleanup(dev);
16258
16259 intel_overlay_cleanup(dev_priv);
16260
16261 intel_teardown_gmbus(dev_priv);
16262
16263 destroy_workqueue(dev_priv->modeset_wq);
16264
16265 intel_fbc_cleanup_cfb(dev_priv);
16266 }
16267
16268 /*
16269 * set vga decode state - true == enable VGA decode
16270 */
16271 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16272 {
16273 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16274 u16 gmch_ctrl;
16275
16276 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16277 DRM_ERROR("failed to read control word\n");
16278 return -EIO;
16279 }
16280
16281 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16282 return 0;
16283
16284 if (state)
16285 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16286 else
16287 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16288
16289 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16290 DRM_ERROR("failed to write control word\n");
16291 return -EIO;
16292 }
16293
16294 return 0;
16295 }
16296
16297 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16298
16299 struct intel_display_error_state {
16300
16301 u32 power_well_driver;
16302
16303 int num_transcoders;
16304
16305 struct intel_cursor_error_state {
16306 u32 control;
16307 u32 position;
16308 u32 base;
16309 u32 size;
16310 } cursor[I915_MAX_PIPES];
16311
16312 struct intel_pipe_error_state {
16313 bool power_domain_on;
16314 u32 source;
16315 u32 stat;
16316 } pipe[I915_MAX_PIPES];
16317
16318 struct intel_plane_error_state {
16319 u32 control;
16320 u32 stride;
16321 u32 size;
16322 u32 pos;
16323 u32 addr;
16324 u32 surface;
16325 u32 tile_offset;
16326 } plane[I915_MAX_PIPES];
16327
16328 struct intel_transcoder_error_state {
16329 bool power_domain_on;
16330 enum transcoder cpu_transcoder;
16331
16332 u32 conf;
16333
16334 u32 htotal;
16335 u32 hblank;
16336 u32 hsync;
16337 u32 vtotal;
16338 u32 vblank;
16339 u32 vsync;
16340 } transcoder[4];
16341 };
16342
16343 struct intel_display_error_state *
16344 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16345 {
16346 struct intel_display_error_state *error;
16347 int transcoders[] = {
16348 TRANSCODER_A,
16349 TRANSCODER_B,
16350 TRANSCODER_C,
16351 TRANSCODER_EDP,
16352 };
16353 int i;
16354
16355 if (!HAS_DISPLAY(dev_priv))
16356 return NULL;
16357
16358 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16359 if (error == NULL)
16360 return NULL;
16361
16362 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16363 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16364
16365 for_each_pipe(dev_priv, i) {
16366 error->pipe[i].power_domain_on =
16367 __intel_display_power_is_enabled(dev_priv,
16368 POWER_DOMAIN_PIPE(i));
16369 if (!error->pipe[i].power_domain_on)
16370 continue;
16371
16372 error->cursor[i].control = I915_READ(CURCNTR(i));
16373 error->cursor[i].position = I915_READ(CURPOS(i));
16374 error->cursor[i].base = I915_READ(CURBASE(i));
16375
16376 error->plane[i].control = I915_READ(DSPCNTR(i));
16377 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16378 if (INTEL_GEN(dev_priv) <= 3) {
16379 error->plane[i].size = I915_READ(DSPSIZE(i));
16380 error->plane[i].pos = I915_READ(DSPPOS(i));
16381 }
16382 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16383 error->plane[i].addr = I915_READ(DSPADDR(i));
16384 if (INTEL_GEN(dev_priv) >= 4) {
16385 error->plane[i].surface = I915_READ(DSPSURF(i));
16386 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16387 }
16388
16389 error->pipe[i].source = I915_READ(PIPESRC(i));
16390
16391 if (HAS_GMCH(dev_priv))
16392 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16393 }
16394
16395 /* Note: this does not include DSI transcoders. */
16396 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16397 if (HAS_DDI(dev_priv))
16398 error->num_transcoders++; /* Account for eDP. */
16399
16400 for (i = 0; i < error->num_transcoders; i++) {
16401 enum transcoder cpu_transcoder = transcoders[i];
16402
16403 error->transcoder[i].power_domain_on =
16404 __intel_display_power_is_enabled(dev_priv,
16405 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16406 if (!error->transcoder[i].power_domain_on)
16407 continue;
16408
16409 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16410
16411 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16412 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16413 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16414 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16415 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16416 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16417 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16418 }
16419
16420 return error;
16421 }
16422
16423 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16424
16425 void
16426 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16427 struct intel_display_error_state *error)
16428 {
16429 struct drm_i915_private *dev_priv = m->i915;
16430 int i;
16431
16432 if (!error)
16433 return;
16434
16435 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16436 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16437 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16438 error->power_well_driver);
16439 for_each_pipe(dev_priv, i) {
16440 err_printf(m, "Pipe [%d]:\n", i);
16441 err_printf(m, " Power: %s\n",
16442 onoff(error->pipe[i].power_domain_on));
16443 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
16444 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
16445
16446 err_printf(m, "Plane [%d]:\n", i);
16447 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16448 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
16449 if (INTEL_GEN(dev_priv) <= 3) {
16450 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16451 err_printf(m, " POS: %08x\n", error->plane[i].pos);
16452 }
16453 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16454 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
16455 if (INTEL_GEN(dev_priv) >= 4) {
16456 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16457 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
16458 }
16459
16460 err_printf(m, "Cursor [%d]:\n", i);
16461 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16462 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16463 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
16464 }
16465
16466 for (i = 0; i < error->num_transcoders; i++) {
16467 err_printf(m, "CPU transcoder: %s\n",
16468 transcoder_name(error->transcoder[i].cpu_transcoder));
16469 err_printf(m, " Power: %s\n",
16470 onoff(error->transcoder[i].power_domain_on));
16471 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16472 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16473 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16474 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16475 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16476 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16477 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16478 }
16479 }
16480
16481 #endif