]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Fix per-pixel alpha with CCS
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_trace.h"
49 #include "intel_acpi.h"
50 #include "intel_atomic.h"
51 #include "intel_atomic_plane.h"
52 #include "intel_bw.h"
53 #include "intel_color.h"
54 #include "intel_cdclk.h"
55 #include "intel_crt.h"
56 #include "intel_ddi.h"
57 #include "intel_dp.h"
58 #include "intel_drv.h"
59 #include "intel_dsi.h"
60 #include "intel_dvo.h"
61 #include "intel_fbc.h"
62 #include "intel_fbdev.h"
63 #include "intel_fifo_underrun.h"
64 #include "intel_frontbuffer.h"
65 #include "intel_gmbus.h"
66 #include "intel_hdcp.h"
67 #include "intel_hdmi.h"
68 #include "intel_hotplug.h"
69 #include "intel_lvds.h"
70 #include "intel_overlay.h"
71 #include "intel_pipe_crc.h"
72 #include "intel_pm.h"
73 #include "intel_psr.h"
74 #include "intel_quirks.h"
75 #include "intel_sdvo.h"
76 #include "intel_sideband.h"
77 #include "intel_sprite.h"
78 #include "intel_tv.h"
79 #include "intel_vdsc.h"
80
81 /* Primary plane formats for gen <= 3 */
82 static const u32 i8xx_primary_formats[] = {
83 DRM_FORMAT_C8,
84 DRM_FORMAT_RGB565,
85 DRM_FORMAT_XRGB1555,
86 DRM_FORMAT_XRGB8888,
87 };
88
89 /* Primary plane formats for gen >= 4 */
90 static const u32 i965_primary_formats[] = {
91 DRM_FORMAT_C8,
92 DRM_FORMAT_RGB565,
93 DRM_FORMAT_XRGB8888,
94 DRM_FORMAT_XBGR8888,
95 DRM_FORMAT_XRGB2101010,
96 DRM_FORMAT_XBGR2101010,
97 };
98
99 static const u64 i9xx_format_modifiers[] = {
100 I915_FORMAT_MOD_X_TILED,
101 DRM_FORMAT_MOD_LINEAR,
102 DRM_FORMAT_MOD_INVALID
103 };
104
105 /* Cursor formats */
106 static const u32 intel_cursor_formats[] = {
107 DRM_FORMAT_ARGB8888,
108 };
109
110 static const u64 cursor_format_modifiers[] = {
111 DRM_FORMAT_MOD_LINEAR,
112 DRM_FORMAT_MOD_INVALID
113 };
114
115 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
116 struct intel_crtc_state *pipe_config);
117 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
118 struct intel_crtc_state *pipe_config);
119
120 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
121 struct drm_i915_gem_object *obj,
122 struct drm_mode_fb_cmd2 *mode_cmd);
123 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
124 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
125 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
126 const struct intel_link_m_n *m_n,
127 const struct intel_link_m_n *m2_n2);
128 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
129 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
130 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
132 static void vlv_prepare_pll(struct intel_crtc *crtc,
133 const struct intel_crtc_state *pipe_config);
134 static void chv_prepare_pll(struct intel_crtc *crtc,
135 const struct intel_crtc_state *pipe_config);
136 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
137 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
138 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
139 struct intel_crtc_state *crtc_state);
140 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
141 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
142 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
143 static void intel_modeset_setup_hw_state(struct drm_device *dev,
144 struct drm_modeset_acquire_ctx *ctx);
145 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
146
147 struct intel_limit {
148 struct {
149 int min, max;
150 } dot, vco, n, m, m1, m2, p, p1;
151
152 struct {
153 int dot_limit;
154 int p2_slow, p2_fast;
155 } p2;
156 };
157
158 /* returns HPLL frequency in kHz */
159 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
160 {
161 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
162
163 /* Obtain SKU information */
164 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
165 CCK_FUSE_HPLL_FREQ_MASK;
166
167 return vco_freq[hpll_freq] * 1000;
168 }
169
170 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
171 const char *name, u32 reg, int ref_freq)
172 {
173 u32 val;
174 int divider;
175
176 val = vlv_cck_read(dev_priv, reg);
177 divider = val & CCK_FREQUENCY_VALUES;
178
179 WARN((val & CCK_FREQUENCY_STATUS) !=
180 (divider << CCK_FREQUENCY_STATUS_SHIFT),
181 "%s change in progress\n", name);
182
183 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
184 }
185
186 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
187 const char *name, u32 reg)
188 {
189 int hpll;
190
191 vlv_cck_get(dev_priv);
192
193 if (dev_priv->hpll_freq == 0)
194 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
195
196 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
197
198 vlv_cck_put(dev_priv);
199
200 return hpll;
201 }
202
203 static void intel_update_czclk(struct drm_i915_private *dev_priv)
204 {
205 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
206 return;
207
208 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
209 CCK_CZ_CLOCK_CONTROL);
210
211 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
212 }
213
214 static inline u32 /* units of 100MHz */
215 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
216 const struct intel_crtc_state *pipe_config)
217 {
218 if (HAS_DDI(dev_priv))
219 return pipe_config->port_clock; /* SPLL */
220 else
221 return dev_priv->fdi_pll_freq;
222 }
223
224 static const struct intel_limit intel_limits_i8xx_dac = {
225 .dot = { .min = 25000, .max = 350000 },
226 .vco = { .min = 908000, .max = 1512000 },
227 .n = { .min = 2, .max = 16 },
228 .m = { .min = 96, .max = 140 },
229 .m1 = { .min = 18, .max = 26 },
230 .m2 = { .min = 6, .max = 16 },
231 .p = { .min = 4, .max = 128 },
232 .p1 = { .min = 2, .max = 33 },
233 .p2 = { .dot_limit = 165000,
234 .p2_slow = 4, .p2_fast = 2 },
235 };
236
237 static const struct intel_limit intel_limits_i8xx_dvo = {
238 .dot = { .min = 25000, .max = 350000 },
239 .vco = { .min = 908000, .max = 1512000 },
240 .n = { .min = 2, .max = 16 },
241 .m = { .min = 96, .max = 140 },
242 .m1 = { .min = 18, .max = 26 },
243 .m2 = { .min = 6, .max = 16 },
244 .p = { .min = 4, .max = 128 },
245 .p1 = { .min = 2, .max = 33 },
246 .p2 = { .dot_limit = 165000,
247 .p2_slow = 4, .p2_fast = 4 },
248 };
249
250 static const struct intel_limit intel_limits_i8xx_lvds = {
251 .dot = { .min = 25000, .max = 350000 },
252 .vco = { .min = 908000, .max = 1512000 },
253 .n = { .min = 2, .max = 16 },
254 .m = { .min = 96, .max = 140 },
255 .m1 = { .min = 18, .max = 26 },
256 .m2 = { .min = 6, .max = 16 },
257 .p = { .min = 4, .max = 128 },
258 .p1 = { .min = 1, .max = 6 },
259 .p2 = { .dot_limit = 165000,
260 .p2_slow = 14, .p2_fast = 7 },
261 };
262
263 static const struct intel_limit intel_limits_i9xx_sdvo = {
264 .dot = { .min = 20000, .max = 400000 },
265 .vco = { .min = 1400000, .max = 2800000 },
266 .n = { .min = 1, .max = 6 },
267 .m = { .min = 70, .max = 120 },
268 .m1 = { .min = 8, .max = 18 },
269 .m2 = { .min = 3, .max = 7 },
270 .p = { .min = 5, .max = 80 },
271 .p1 = { .min = 1, .max = 8 },
272 .p2 = { .dot_limit = 200000,
273 .p2_slow = 10, .p2_fast = 5 },
274 };
275
276 static const struct intel_limit intel_limits_i9xx_lvds = {
277 .dot = { .min = 20000, .max = 400000 },
278 .vco = { .min = 1400000, .max = 2800000 },
279 .n = { .min = 1, .max = 6 },
280 .m = { .min = 70, .max = 120 },
281 .m1 = { .min = 8, .max = 18 },
282 .m2 = { .min = 3, .max = 7 },
283 .p = { .min = 7, .max = 98 },
284 .p1 = { .min = 1, .max = 8 },
285 .p2 = { .dot_limit = 112000,
286 .p2_slow = 14, .p2_fast = 7 },
287 };
288
289
290 static const struct intel_limit intel_limits_g4x_sdvo = {
291 .dot = { .min = 25000, .max = 270000 },
292 .vco = { .min = 1750000, .max = 3500000},
293 .n = { .min = 1, .max = 4 },
294 .m = { .min = 104, .max = 138 },
295 .m1 = { .min = 17, .max = 23 },
296 .m2 = { .min = 5, .max = 11 },
297 .p = { .min = 10, .max = 30 },
298 .p1 = { .min = 1, .max = 3},
299 .p2 = { .dot_limit = 270000,
300 .p2_slow = 10,
301 .p2_fast = 10
302 },
303 };
304
305 static const struct intel_limit intel_limits_g4x_hdmi = {
306 .dot = { .min = 22000, .max = 400000 },
307 .vco = { .min = 1750000, .max = 3500000},
308 .n = { .min = 1, .max = 4 },
309 .m = { .min = 104, .max = 138 },
310 .m1 = { .min = 16, .max = 23 },
311 .m2 = { .min = 5, .max = 11 },
312 .p = { .min = 5, .max = 80 },
313 .p1 = { .min = 1, .max = 8},
314 .p2 = { .dot_limit = 165000,
315 .p2_slow = 10, .p2_fast = 5 },
316 };
317
318 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
319 .dot = { .min = 20000, .max = 115000 },
320 .vco = { .min = 1750000, .max = 3500000 },
321 .n = { .min = 1, .max = 3 },
322 .m = { .min = 104, .max = 138 },
323 .m1 = { .min = 17, .max = 23 },
324 .m2 = { .min = 5, .max = 11 },
325 .p = { .min = 28, .max = 112 },
326 .p1 = { .min = 2, .max = 8 },
327 .p2 = { .dot_limit = 0,
328 .p2_slow = 14, .p2_fast = 14
329 },
330 };
331
332 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
333 .dot = { .min = 80000, .max = 224000 },
334 .vco = { .min = 1750000, .max = 3500000 },
335 .n = { .min = 1, .max = 3 },
336 .m = { .min = 104, .max = 138 },
337 .m1 = { .min = 17, .max = 23 },
338 .m2 = { .min = 5, .max = 11 },
339 .p = { .min = 14, .max = 42 },
340 .p1 = { .min = 2, .max = 6 },
341 .p2 = { .dot_limit = 0,
342 .p2_slow = 7, .p2_fast = 7
343 },
344 };
345
346 static const struct intel_limit intel_limits_pineview_sdvo = {
347 .dot = { .min = 20000, .max = 400000},
348 .vco = { .min = 1700000, .max = 3500000 },
349 /* Pineview's Ncounter is a ring counter */
350 .n = { .min = 3, .max = 6 },
351 .m = { .min = 2, .max = 256 },
352 /* Pineview only has one combined m divider, which we treat as m2. */
353 .m1 = { .min = 0, .max = 0 },
354 .m2 = { .min = 0, .max = 254 },
355 .p = { .min = 5, .max = 80 },
356 .p1 = { .min = 1, .max = 8 },
357 .p2 = { .dot_limit = 200000,
358 .p2_slow = 10, .p2_fast = 5 },
359 };
360
361 static const struct intel_limit intel_limits_pineview_lvds = {
362 .dot = { .min = 20000, .max = 400000 },
363 .vco = { .min = 1700000, .max = 3500000 },
364 .n = { .min = 3, .max = 6 },
365 .m = { .min = 2, .max = 256 },
366 .m1 = { .min = 0, .max = 0 },
367 .m2 = { .min = 0, .max = 254 },
368 .p = { .min = 7, .max = 112 },
369 .p1 = { .min = 1, .max = 8 },
370 .p2 = { .dot_limit = 112000,
371 .p2_slow = 14, .p2_fast = 14 },
372 };
373
374 /* Ironlake / Sandybridge
375 *
376 * We calculate clock using (register_value + 2) for N/M1/M2, so here
377 * the range value for them is (actual_value - 2).
378 */
379 static const struct intel_limit intel_limits_ironlake_dac = {
380 .dot = { .min = 25000, .max = 350000 },
381 .vco = { .min = 1760000, .max = 3510000 },
382 .n = { .min = 1, .max = 5 },
383 .m = { .min = 79, .max = 127 },
384 .m1 = { .min = 12, .max = 22 },
385 .m2 = { .min = 5, .max = 9 },
386 .p = { .min = 5, .max = 80 },
387 .p1 = { .min = 1, .max = 8 },
388 .p2 = { .dot_limit = 225000,
389 .p2_slow = 10, .p2_fast = 5 },
390 };
391
392 static const struct intel_limit intel_limits_ironlake_single_lvds = {
393 .dot = { .min = 25000, .max = 350000 },
394 .vco = { .min = 1760000, .max = 3510000 },
395 .n = { .min = 1, .max = 3 },
396 .m = { .min = 79, .max = 118 },
397 .m1 = { .min = 12, .max = 22 },
398 .m2 = { .min = 5, .max = 9 },
399 .p = { .min = 28, .max = 112 },
400 .p1 = { .min = 2, .max = 8 },
401 .p2 = { .dot_limit = 225000,
402 .p2_slow = 14, .p2_fast = 14 },
403 };
404
405 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
406 .dot = { .min = 25000, .max = 350000 },
407 .vco = { .min = 1760000, .max = 3510000 },
408 .n = { .min = 1, .max = 3 },
409 .m = { .min = 79, .max = 127 },
410 .m1 = { .min = 12, .max = 22 },
411 .m2 = { .min = 5, .max = 9 },
412 .p = { .min = 14, .max = 56 },
413 .p1 = { .min = 2, .max = 8 },
414 .p2 = { .dot_limit = 225000,
415 .p2_slow = 7, .p2_fast = 7 },
416 };
417
418 /* LVDS 100mhz refclk limits. */
419 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
420 .dot = { .min = 25000, .max = 350000 },
421 .vco = { .min = 1760000, .max = 3510000 },
422 .n = { .min = 1, .max = 2 },
423 .m = { .min = 79, .max = 126 },
424 .m1 = { .min = 12, .max = 22 },
425 .m2 = { .min = 5, .max = 9 },
426 .p = { .min = 28, .max = 112 },
427 .p1 = { .min = 2, .max = 8 },
428 .p2 = { .dot_limit = 225000,
429 .p2_slow = 14, .p2_fast = 14 },
430 };
431
432 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
433 .dot = { .min = 25000, .max = 350000 },
434 .vco = { .min = 1760000, .max = 3510000 },
435 .n = { .min = 1, .max = 3 },
436 .m = { .min = 79, .max = 126 },
437 .m1 = { .min = 12, .max = 22 },
438 .m2 = { .min = 5, .max = 9 },
439 .p = { .min = 14, .max = 42 },
440 .p1 = { .min = 2, .max = 6 },
441 .p2 = { .dot_limit = 225000,
442 .p2_slow = 7, .p2_fast = 7 },
443 };
444
445 static const struct intel_limit intel_limits_vlv = {
446 /*
447 * These are the data rate limits (measured in fast clocks)
448 * since those are the strictest limits we have. The fast
449 * clock and actual rate limits are more relaxed, so checking
450 * them would make no difference.
451 */
452 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
453 .vco = { .min = 4000000, .max = 6000000 },
454 .n = { .min = 1, .max = 7 },
455 .m1 = { .min = 2, .max = 3 },
456 .m2 = { .min = 11, .max = 156 },
457 .p1 = { .min = 2, .max = 3 },
458 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
459 };
460
461 static const struct intel_limit intel_limits_chv = {
462 /*
463 * These are the data rate limits (measured in fast clocks)
464 * since those are the strictest limits we have. The fast
465 * clock and actual rate limits are more relaxed, so checking
466 * them would make no difference.
467 */
468 .dot = { .min = 25000 * 5, .max = 540000 * 5},
469 .vco = { .min = 4800000, .max = 6480000 },
470 .n = { .min = 1, .max = 1 },
471 .m1 = { .min = 2, .max = 2 },
472 .m2 = { .min = 24 << 22, .max = 175 << 22 },
473 .p1 = { .min = 2, .max = 4 },
474 .p2 = { .p2_slow = 1, .p2_fast = 14 },
475 };
476
477 static const struct intel_limit intel_limits_bxt = {
478 /* FIXME: find real dot limits */
479 .dot = { .min = 0, .max = INT_MAX },
480 .vco = { .min = 4800000, .max = 6700000 },
481 .n = { .min = 1, .max = 1 },
482 .m1 = { .min = 2, .max = 2 },
483 /* FIXME: find real m2 limits */
484 .m2 = { .min = 2 << 22, .max = 255 << 22 },
485 .p1 = { .min = 2, .max = 4 },
486 .p2 = { .p2_slow = 1, .p2_fast = 20 },
487 };
488
489 /* WA Display #0827: Gen9:all */
490 static void
491 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
492 {
493 if (enable)
494 I915_WRITE(CLKGATE_DIS_PSL(pipe),
495 I915_READ(CLKGATE_DIS_PSL(pipe)) |
496 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
497 else
498 I915_WRITE(CLKGATE_DIS_PSL(pipe),
499 I915_READ(CLKGATE_DIS_PSL(pipe)) &
500 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
501 }
502
503 /* Wa_2006604312:icl */
504 static void
505 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
506 bool enable)
507 {
508 if (enable)
509 I915_WRITE(CLKGATE_DIS_PSL(pipe),
510 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
511 else
512 I915_WRITE(CLKGATE_DIS_PSL(pipe),
513 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
514 }
515
516 static bool
517 needs_modeset(const struct drm_crtc_state *state)
518 {
519 return drm_atomic_crtc_needs_modeset(state);
520 }
521
522 /*
523 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
524 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
525 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
526 * The helpers' return value is the rate of the clock that is fed to the
527 * display engine's pipe which can be the above fast dot clock rate or a
528 * divided-down version of it.
529 */
530 /* m1 is reserved as 0 in Pineview, n is a ring counter */
531 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
532 {
533 clock->m = clock->m2 + 2;
534 clock->p = clock->p1 * clock->p2;
535 if (WARN_ON(clock->n == 0 || clock->p == 0))
536 return 0;
537 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
538 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
539
540 return clock->dot;
541 }
542
543 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
544 {
545 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
546 }
547
548 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
549 {
550 clock->m = i9xx_dpll_compute_m(clock);
551 clock->p = clock->p1 * clock->p2;
552 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
553 return 0;
554 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
555 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
556
557 return clock->dot;
558 }
559
560 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
561 {
562 clock->m = clock->m1 * clock->m2;
563 clock->p = clock->p1 * clock->p2;
564 if (WARN_ON(clock->n == 0 || clock->p == 0))
565 return 0;
566 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
567 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
568
569 return clock->dot / 5;
570 }
571
572 int chv_calc_dpll_params(int refclk, struct dpll *clock)
573 {
574 clock->m = clock->m1 * clock->m2;
575 clock->p = clock->p1 * clock->p2;
576 if (WARN_ON(clock->n == 0 || clock->p == 0))
577 return 0;
578 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
579 clock->n << 22);
580 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
581
582 return clock->dot / 5;
583 }
584
585 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
586
587 /*
588 * Returns whether the given set of divisors are valid for a given refclk with
589 * the given connectors.
590 */
591 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
592 const struct intel_limit *limit,
593 const struct dpll *clock)
594 {
595 if (clock->n < limit->n.min || limit->n.max < clock->n)
596 INTELPllInvalid("n out of range\n");
597 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
598 INTELPllInvalid("p1 out of range\n");
599 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
600 INTELPllInvalid("m2 out of range\n");
601 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
602 INTELPllInvalid("m1 out of range\n");
603
604 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
605 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
606 if (clock->m1 <= clock->m2)
607 INTELPllInvalid("m1 <= m2\n");
608
609 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
610 !IS_GEN9_LP(dev_priv)) {
611 if (clock->p < limit->p.min || limit->p.max < clock->p)
612 INTELPllInvalid("p out of range\n");
613 if (clock->m < limit->m.min || limit->m.max < clock->m)
614 INTELPllInvalid("m out of range\n");
615 }
616
617 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
618 INTELPllInvalid("vco out of range\n");
619 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
620 * connector, etc., rather than just a single range.
621 */
622 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
623 INTELPllInvalid("dot out of range\n");
624
625 return true;
626 }
627
628 static int
629 i9xx_select_p2_div(const struct intel_limit *limit,
630 const struct intel_crtc_state *crtc_state,
631 int target)
632 {
633 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
634
635 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
636 /*
637 * For LVDS just rely on its current settings for dual-channel.
638 * We haven't figured out how to reliably set up different
639 * single/dual channel state, if we even can.
640 */
641 if (intel_is_dual_link_lvds(dev_priv))
642 return limit->p2.p2_fast;
643 else
644 return limit->p2.p2_slow;
645 } else {
646 if (target < limit->p2.dot_limit)
647 return limit->p2.p2_slow;
648 else
649 return limit->p2.p2_fast;
650 }
651 }
652
653 /*
654 * Returns a set of divisors for the desired target clock with the given
655 * refclk, or FALSE. The returned values represent the clock equation:
656 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
657 *
658 * Target and reference clocks are specified in kHz.
659 *
660 * If match_clock is provided, then best_clock P divider must match the P
661 * divider from @match_clock used for LVDS downclocking.
662 */
663 static bool
664 i9xx_find_best_dpll(const struct intel_limit *limit,
665 struct intel_crtc_state *crtc_state,
666 int target, int refclk, struct dpll *match_clock,
667 struct dpll *best_clock)
668 {
669 struct drm_device *dev = crtc_state->base.crtc->dev;
670 struct dpll clock;
671 int err = target;
672
673 memset(best_clock, 0, sizeof(*best_clock));
674
675 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
676
677 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
678 clock.m1++) {
679 for (clock.m2 = limit->m2.min;
680 clock.m2 <= limit->m2.max; clock.m2++) {
681 if (clock.m2 >= clock.m1)
682 break;
683 for (clock.n = limit->n.min;
684 clock.n <= limit->n.max; clock.n++) {
685 for (clock.p1 = limit->p1.min;
686 clock.p1 <= limit->p1.max; clock.p1++) {
687 int this_err;
688
689 i9xx_calc_dpll_params(refclk, &clock);
690 if (!intel_PLL_is_valid(to_i915(dev),
691 limit,
692 &clock))
693 continue;
694 if (match_clock &&
695 clock.p != match_clock->p)
696 continue;
697
698 this_err = abs(clock.dot - target);
699 if (this_err < err) {
700 *best_clock = clock;
701 err = this_err;
702 }
703 }
704 }
705 }
706 }
707
708 return (err != target);
709 }
710
711 /*
712 * Returns a set of divisors for the desired target clock with the given
713 * refclk, or FALSE. The returned values represent the clock equation:
714 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
715 *
716 * Target and reference clocks are specified in kHz.
717 *
718 * If match_clock is provided, then best_clock P divider must match the P
719 * divider from @match_clock used for LVDS downclocking.
720 */
721 static bool
722 pnv_find_best_dpll(const struct intel_limit *limit,
723 struct intel_crtc_state *crtc_state,
724 int target, int refclk, struct dpll *match_clock,
725 struct dpll *best_clock)
726 {
727 struct drm_device *dev = crtc_state->base.crtc->dev;
728 struct dpll clock;
729 int err = target;
730
731 memset(best_clock, 0, sizeof(*best_clock));
732
733 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
734
735 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
736 clock.m1++) {
737 for (clock.m2 = limit->m2.min;
738 clock.m2 <= limit->m2.max; clock.m2++) {
739 for (clock.n = limit->n.min;
740 clock.n <= limit->n.max; clock.n++) {
741 for (clock.p1 = limit->p1.min;
742 clock.p1 <= limit->p1.max; clock.p1++) {
743 int this_err;
744
745 pnv_calc_dpll_params(refclk, &clock);
746 if (!intel_PLL_is_valid(to_i915(dev),
747 limit,
748 &clock))
749 continue;
750 if (match_clock &&
751 clock.p != match_clock->p)
752 continue;
753
754 this_err = abs(clock.dot - target);
755 if (this_err < err) {
756 *best_clock = clock;
757 err = this_err;
758 }
759 }
760 }
761 }
762 }
763
764 return (err != target);
765 }
766
767 /*
768 * Returns a set of divisors for the desired target clock with the given
769 * refclk, or FALSE. The returned values represent the clock equation:
770 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
771 *
772 * Target and reference clocks are specified in kHz.
773 *
774 * If match_clock is provided, then best_clock P divider must match the P
775 * divider from @match_clock used for LVDS downclocking.
776 */
777 static bool
778 g4x_find_best_dpll(const struct intel_limit *limit,
779 struct intel_crtc_state *crtc_state,
780 int target, int refclk, struct dpll *match_clock,
781 struct dpll *best_clock)
782 {
783 struct drm_device *dev = crtc_state->base.crtc->dev;
784 struct dpll clock;
785 int max_n;
786 bool found = false;
787 /* approximately equals target * 0.00585 */
788 int err_most = (target >> 8) + (target >> 9);
789
790 memset(best_clock, 0, sizeof(*best_clock));
791
792 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
793
794 max_n = limit->n.max;
795 /* based on hardware requirement, prefer smaller n to precision */
796 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
797 /* based on hardware requirement, prefere larger m1,m2 */
798 for (clock.m1 = limit->m1.max;
799 clock.m1 >= limit->m1.min; clock.m1--) {
800 for (clock.m2 = limit->m2.max;
801 clock.m2 >= limit->m2.min; clock.m2--) {
802 for (clock.p1 = limit->p1.max;
803 clock.p1 >= limit->p1.min; clock.p1--) {
804 int this_err;
805
806 i9xx_calc_dpll_params(refclk, &clock);
807 if (!intel_PLL_is_valid(to_i915(dev),
808 limit,
809 &clock))
810 continue;
811
812 this_err = abs(clock.dot - target);
813 if (this_err < err_most) {
814 *best_clock = clock;
815 err_most = this_err;
816 max_n = clock.n;
817 found = true;
818 }
819 }
820 }
821 }
822 }
823 return found;
824 }
825
826 /*
827 * Check if the calculated PLL configuration is more optimal compared to the
828 * best configuration and error found so far. Return the calculated error.
829 */
830 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
831 const struct dpll *calculated_clock,
832 const struct dpll *best_clock,
833 unsigned int best_error_ppm,
834 unsigned int *error_ppm)
835 {
836 /*
837 * For CHV ignore the error and consider only the P value.
838 * Prefer a bigger P value based on HW requirements.
839 */
840 if (IS_CHERRYVIEW(to_i915(dev))) {
841 *error_ppm = 0;
842
843 return calculated_clock->p > best_clock->p;
844 }
845
846 if (WARN_ON_ONCE(!target_freq))
847 return false;
848
849 *error_ppm = div_u64(1000000ULL *
850 abs(target_freq - calculated_clock->dot),
851 target_freq);
852 /*
853 * Prefer a better P value over a better (smaller) error if the error
854 * is small. Ensure this preference for future configurations too by
855 * setting the error to 0.
856 */
857 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
858 *error_ppm = 0;
859
860 return true;
861 }
862
863 return *error_ppm + 10 < best_error_ppm;
864 }
865
866 /*
867 * Returns a set of divisors for the desired target clock with the given
868 * refclk, or FALSE. The returned values represent the clock equation:
869 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
870 */
871 static bool
872 vlv_find_best_dpll(const struct intel_limit *limit,
873 struct intel_crtc_state *crtc_state,
874 int target, int refclk, struct dpll *match_clock,
875 struct dpll *best_clock)
876 {
877 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
878 struct drm_device *dev = crtc->base.dev;
879 struct dpll clock;
880 unsigned int bestppm = 1000000;
881 /* min update 19.2 MHz */
882 int max_n = min(limit->n.max, refclk / 19200);
883 bool found = false;
884
885 target *= 5; /* fast clock */
886
887 memset(best_clock, 0, sizeof(*best_clock));
888
889 /* based on hardware requirement, prefer smaller n to precision */
890 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
891 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
892 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
893 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
894 clock.p = clock.p1 * clock.p2;
895 /* based on hardware requirement, prefer bigger m1,m2 values */
896 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
897 unsigned int ppm;
898
899 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
900 refclk * clock.m1);
901
902 vlv_calc_dpll_params(refclk, &clock);
903
904 if (!intel_PLL_is_valid(to_i915(dev),
905 limit,
906 &clock))
907 continue;
908
909 if (!vlv_PLL_is_optimal(dev, target,
910 &clock,
911 best_clock,
912 bestppm, &ppm))
913 continue;
914
915 *best_clock = clock;
916 bestppm = ppm;
917 found = true;
918 }
919 }
920 }
921 }
922
923 return found;
924 }
925
926 /*
927 * Returns a set of divisors for the desired target clock with the given
928 * refclk, or FALSE. The returned values represent the clock equation:
929 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
930 */
931 static bool
932 chv_find_best_dpll(const struct intel_limit *limit,
933 struct intel_crtc_state *crtc_state,
934 int target, int refclk, struct dpll *match_clock,
935 struct dpll *best_clock)
936 {
937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
938 struct drm_device *dev = crtc->base.dev;
939 unsigned int best_error_ppm;
940 struct dpll clock;
941 u64 m2;
942 int found = false;
943
944 memset(best_clock, 0, sizeof(*best_clock));
945 best_error_ppm = 1000000;
946
947 /*
948 * Based on hardware doc, the n always set to 1, and m1 always
949 * set to 2. If requires to support 200Mhz refclk, we need to
950 * revisit this because n may not 1 anymore.
951 */
952 clock.n = 1, clock.m1 = 2;
953 target *= 5; /* fast clock */
954
955 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
956 for (clock.p2 = limit->p2.p2_fast;
957 clock.p2 >= limit->p2.p2_slow;
958 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
959 unsigned int error_ppm;
960
961 clock.p = clock.p1 * clock.p2;
962
963 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
964 refclk * clock.m1);
965
966 if (m2 > INT_MAX/clock.m1)
967 continue;
968
969 clock.m2 = m2;
970
971 chv_calc_dpll_params(refclk, &clock);
972
973 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
974 continue;
975
976 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
977 best_error_ppm, &error_ppm))
978 continue;
979
980 *best_clock = clock;
981 best_error_ppm = error_ppm;
982 found = true;
983 }
984 }
985
986 return found;
987 }
988
989 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
990 struct dpll *best_clock)
991 {
992 int refclk = 100000;
993 const struct intel_limit *limit = &intel_limits_bxt;
994
995 return chv_find_best_dpll(limit, crtc_state,
996 crtc_state->port_clock, refclk,
997 NULL, best_clock);
998 }
999
1000 bool intel_crtc_active(struct intel_crtc *crtc)
1001 {
1002 /* Be paranoid as we can arrive here with only partial
1003 * state retrieved from the hardware during setup.
1004 *
1005 * We can ditch the adjusted_mode.crtc_clock check as soon
1006 * as Haswell has gained clock readout/fastboot support.
1007 *
1008 * We can ditch the crtc->primary->state->fb check as soon as we can
1009 * properly reconstruct framebuffers.
1010 *
1011 * FIXME: The intel_crtc->active here should be switched to
1012 * crtc->state->active once we have proper CRTC states wired up
1013 * for atomic.
1014 */
1015 return crtc->active && crtc->base.primary->state->fb &&
1016 crtc->config->base.adjusted_mode.crtc_clock;
1017 }
1018
1019 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1020 enum pipe pipe)
1021 {
1022 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1023
1024 return crtc->config->cpu_transcoder;
1025 }
1026
1027 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1028 enum pipe pipe)
1029 {
1030 i915_reg_t reg = PIPEDSL(pipe);
1031 u32 line1, line2;
1032 u32 line_mask;
1033
1034 if (IS_GEN(dev_priv, 2))
1035 line_mask = DSL_LINEMASK_GEN2;
1036 else
1037 line_mask = DSL_LINEMASK_GEN3;
1038
1039 line1 = I915_READ(reg) & line_mask;
1040 msleep(5);
1041 line2 = I915_READ(reg) & line_mask;
1042
1043 return line1 != line2;
1044 }
1045
1046 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1047 {
1048 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1049 enum pipe pipe = crtc->pipe;
1050
1051 /* Wait for the display line to settle/start moving */
1052 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1053 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1054 pipe_name(pipe), onoff(state));
1055 }
1056
1057 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1058 {
1059 wait_for_pipe_scanline_moving(crtc, false);
1060 }
1061
1062 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1063 {
1064 wait_for_pipe_scanline_moving(crtc, true);
1065 }
1066
1067 static void
1068 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1069 {
1070 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1072
1073 if (INTEL_GEN(dev_priv) >= 4) {
1074 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1075 i915_reg_t reg = PIPECONF(cpu_transcoder);
1076
1077 /* Wait for the Pipe State to go off */
1078 if (intel_wait_for_register(&dev_priv->uncore,
1079 reg, I965_PIPECONF_ACTIVE, 0,
1080 100))
1081 WARN(1, "pipe_off wait timed out\n");
1082 } else {
1083 intel_wait_for_pipe_scanline_stopped(crtc);
1084 }
1085 }
1086
1087 /* Only for pre-ILK configs */
1088 void assert_pll(struct drm_i915_private *dev_priv,
1089 enum pipe pipe, bool state)
1090 {
1091 u32 val;
1092 bool cur_state;
1093
1094 val = I915_READ(DPLL(pipe));
1095 cur_state = !!(val & DPLL_VCO_ENABLE);
1096 I915_STATE_WARN(cur_state != state,
1097 "PLL state assertion failure (expected %s, current %s)\n",
1098 onoff(state), onoff(cur_state));
1099 }
1100
1101 /* XXX: the dsi pll is shared between MIPI DSI ports */
1102 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1103 {
1104 u32 val;
1105 bool cur_state;
1106
1107 vlv_cck_get(dev_priv);
1108 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1109 vlv_cck_put(dev_priv);
1110
1111 cur_state = val & DSI_PLL_VCO_EN;
1112 I915_STATE_WARN(cur_state != state,
1113 "DSI PLL state assertion failure (expected %s, current %s)\n",
1114 onoff(state), onoff(cur_state));
1115 }
1116
1117 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1118 enum pipe pipe, bool state)
1119 {
1120 bool cur_state;
1121 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1122 pipe);
1123
1124 if (HAS_DDI(dev_priv)) {
1125 /* DDI does not have a specific FDI_TX register */
1126 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1127 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1128 } else {
1129 u32 val = I915_READ(FDI_TX_CTL(pipe));
1130 cur_state = !!(val & FDI_TX_ENABLE);
1131 }
1132 I915_STATE_WARN(cur_state != state,
1133 "FDI TX state assertion failure (expected %s, current %s)\n",
1134 onoff(state), onoff(cur_state));
1135 }
1136 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1137 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1138
1139 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1140 enum pipe pipe, bool state)
1141 {
1142 u32 val;
1143 bool cur_state;
1144
1145 val = I915_READ(FDI_RX_CTL(pipe));
1146 cur_state = !!(val & FDI_RX_ENABLE);
1147 I915_STATE_WARN(cur_state != state,
1148 "FDI RX state assertion failure (expected %s, current %s)\n",
1149 onoff(state), onoff(cur_state));
1150 }
1151 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1152 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1153
1154 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1155 enum pipe pipe)
1156 {
1157 u32 val;
1158
1159 /* ILK FDI PLL is always enabled */
1160 if (IS_GEN(dev_priv, 5))
1161 return;
1162
1163 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1164 if (HAS_DDI(dev_priv))
1165 return;
1166
1167 val = I915_READ(FDI_TX_CTL(pipe));
1168 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1169 }
1170
1171 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1172 enum pipe pipe, bool state)
1173 {
1174 u32 val;
1175 bool cur_state;
1176
1177 val = I915_READ(FDI_RX_CTL(pipe));
1178 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1179 I915_STATE_WARN(cur_state != state,
1180 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1181 onoff(state), onoff(cur_state));
1182 }
1183
1184 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1185 {
1186 i915_reg_t pp_reg;
1187 u32 val;
1188 enum pipe panel_pipe = INVALID_PIPE;
1189 bool locked = true;
1190
1191 if (WARN_ON(HAS_DDI(dev_priv)))
1192 return;
1193
1194 if (HAS_PCH_SPLIT(dev_priv)) {
1195 u32 port_sel;
1196
1197 pp_reg = PP_CONTROL(0);
1198 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1199
1200 switch (port_sel) {
1201 case PANEL_PORT_SELECT_LVDS:
1202 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1203 break;
1204 case PANEL_PORT_SELECT_DPA:
1205 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1206 break;
1207 case PANEL_PORT_SELECT_DPC:
1208 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1209 break;
1210 case PANEL_PORT_SELECT_DPD:
1211 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1212 break;
1213 default:
1214 MISSING_CASE(port_sel);
1215 break;
1216 }
1217 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1218 /* presumably write lock depends on pipe, not port select */
1219 pp_reg = PP_CONTROL(pipe);
1220 panel_pipe = pipe;
1221 } else {
1222 u32 port_sel;
1223
1224 pp_reg = PP_CONTROL(0);
1225 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1226
1227 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1228 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1229 }
1230
1231 val = I915_READ(pp_reg);
1232 if (!(val & PANEL_POWER_ON) ||
1233 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1234 locked = false;
1235
1236 I915_STATE_WARN(panel_pipe == pipe && locked,
1237 "panel assertion failure, pipe %c regs locked\n",
1238 pipe_name(pipe));
1239 }
1240
1241 void assert_pipe(struct drm_i915_private *dev_priv,
1242 enum pipe pipe, bool state)
1243 {
1244 bool cur_state;
1245 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1246 pipe);
1247 enum intel_display_power_domain power_domain;
1248 intel_wakeref_t wakeref;
1249
1250 /* we keep both pipes enabled on 830 */
1251 if (IS_I830(dev_priv))
1252 state = true;
1253
1254 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1255 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1256 if (wakeref) {
1257 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1258 cur_state = !!(val & PIPECONF_ENABLE);
1259
1260 intel_display_power_put(dev_priv, power_domain, wakeref);
1261 } else {
1262 cur_state = false;
1263 }
1264
1265 I915_STATE_WARN(cur_state != state,
1266 "pipe %c assertion failure (expected %s, current %s)\n",
1267 pipe_name(pipe), onoff(state), onoff(cur_state));
1268 }
1269
1270 static void assert_plane(struct intel_plane *plane, bool state)
1271 {
1272 enum pipe pipe;
1273 bool cur_state;
1274
1275 cur_state = plane->get_hw_state(plane, &pipe);
1276
1277 I915_STATE_WARN(cur_state != state,
1278 "%s assertion failure (expected %s, current %s)\n",
1279 plane->base.name, onoff(state), onoff(cur_state));
1280 }
1281
1282 #define assert_plane_enabled(p) assert_plane(p, true)
1283 #define assert_plane_disabled(p) assert_plane(p, false)
1284
1285 static void assert_planes_disabled(struct intel_crtc *crtc)
1286 {
1287 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1288 struct intel_plane *plane;
1289
1290 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1291 assert_plane_disabled(plane);
1292 }
1293
1294 static void assert_vblank_disabled(struct drm_crtc *crtc)
1295 {
1296 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1297 drm_crtc_vblank_put(crtc);
1298 }
1299
1300 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1301 enum pipe pipe)
1302 {
1303 u32 val;
1304 bool enabled;
1305
1306 val = I915_READ(PCH_TRANSCONF(pipe));
1307 enabled = !!(val & TRANS_ENABLE);
1308 I915_STATE_WARN(enabled,
1309 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1310 pipe_name(pipe));
1311 }
1312
1313 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1314 enum pipe pipe, enum port port,
1315 i915_reg_t dp_reg)
1316 {
1317 enum pipe port_pipe;
1318 bool state;
1319
1320 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1321
1322 I915_STATE_WARN(state && port_pipe == pipe,
1323 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1324 port_name(port), pipe_name(pipe));
1325
1326 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1327 "IBX PCH DP %c still using transcoder B\n",
1328 port_name(port));
1329 }
1330
1331 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1332 enum pipe pipe, enum port port,
1333 i915_reg_t hdmi_reg)
1334 {
1335 enum pipe port_pipe;
1336 bool state;
1337
1338 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1339
1340 I915_STATE_WARN(state && port_pipe == pipe,
1341 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1342 port_name(port), pipe_name(pipe));
1343
1344 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1345 "IBX PCH HDMI %c still using transcoder B\n",
1346 port_name(port));
1347 }
1348
1349 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1350 enum pipe pipe)
1351 {
1352 enum pipe port_pipe;
1353
1354 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1357
1358 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1359 port_pipe == pipe,
1360 "PCH VGA enabled on transcoder %c, should be disabled\n",
1361 pipe_name(pipe));
1362
1363 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1364 port_pipe == pipe,
1365 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1366 pipe_name(pipe));
1367
1368 /* PCH SDVOB multiplex with HDMIB */
1369 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1372 }
1373
1374 static void _vlv_enable_pll(struct intel_crtc *crtc,
1375 const struct intel_crtc_state *pipe_config)
1376 {
1377 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1378 enum pipe pipe = crtc->pipe;
1379
1380 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1381 POSTING_READ(DPLL(pipe));
1382 udelay(150);
1383
1384 if (intel_wait_for_register(&dev_priv->uncore,
1385 DPLL(pipe),
1386 DPLL_LOCK_VLV,
1387 DPLL_LOCK_VLV,
1388 1))
1389 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1390 }
1391
1392 static void vlv_enable_pll(struct intel_crtc *crtc,
1393 const struct intel_crtc_state *pipe_config)
1394 {
1395 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1396 enum pipe pipe = crtc->pipe;
1397
1398 assert_pipe_disabled(dev_priv, pipe);
1399
1400 /* PLL is protected by panel, make sure we can write it */
1401 assert_panel_unlocked(dev_priv, pipe);
1402
1403 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1404 _vlv_enable_pll(crtc, pipe_config);
1405
1406 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1407 POSTING_READ(DPLL_MD(pipe));
1408 }
1409
1410
1411 static void _chv_enable_pll(struct intel_crtc *crtc,
1412 const struct intel_crtc_state *pipe_config)
1413 {
1414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1415 enum pipe pipe = crtc->pipe;
1416 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1417 u32 tmp;
1418
1419 vlv_dpio_get(dev_priv);
1420
1421 /* Enable back the 10bit clock to display controller */
1422 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1423 tmp |= DPIO_DCLKP_EN;
1424 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1425
1426 vlv_dpio_put(dev_priv);
1427
1428 /*
1429 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1430 */
1431 udelay(1);
1432
1433 /* Enable PLL */
1434 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1435
1436 /* Check PLL is locked */
1437 if (intel_wait_for_register(&dev_priv->uncore,
1438 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1439 1))
1440 DRM_ERROR("PLL %d failed to lock\n", pipe);
1441 }
1442
1443 static void chv_enable_pll(struct intel_crtc *crtc,
1444 const struct intel_crtc_state *pipe_config)
1445 {
1446 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1447 enum pipe pipe = crtc->pipe;
1448
1449 assert_pipe_disabled(dev_priv, pipe);
1450
1451 /* PLL is protected by panel, make sure we can write it */
1452 assert_panel_unlocked(dev_priv, pipe);
1453
1454 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1455 _chv_enable_pll(crtc, pipe_config);
1456
1457 if (pipe != PIPE_A) {
1458 /*
1459 * WaPixelRepeatModeFixForC0:chv
1460 *
1461 * DPLLCMD is AWOL. Use chicken bits to propagate
1462 * the value from DPLLBMD to either pipe B or C.
1463 */
1464 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1465 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1466 I915_WRITE(CBR4_VLV, 0);
1467 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1468
1469 /*
1470 * DPLLB VGA mode also seems to cause problems.
1471 * We should always have it disabled.
1472 */
1473 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1474 } else {
1475 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1476 POSTING_READ(DPLL_MD(pipe));
1477 }
1478 }
1479
1480 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1481 {
1482 if (IS_I830(dev_priv))
1483 return false;
1484
1485 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1486 }
1487
1488 static void i9xx_enable_pll(struct intel_crtc *crtc,
1489 const struct intel_crtc_state *crtc_state)
1490 {
1491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1492 i915_reg_t reg = DPLL(crtc->pipe);
1493 u32 dpll = crtc_state->dpll_hw_state.dpll;
1494 int i;
1495
1496 assert_pipe_disabled(dev_priv, crtc->pipe);
1497
1498 /* PLL is protected by panel, make sure we can write it */
1499 if (i9xx_has_pps(dev_priv))
1500 assert_panel_unlocked(dev_priv, crtc->pipe);
1501
1502 /*
1503 * Apparently we need to have VGA mode enabled prior to changing
1504 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1505 * dividers, even though the register value does change.
1506 */
1507 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1508 I915_WRITE(reg, dpll);
1509
1510 /* Wait for the clocks to stabilize. */
1511 POSTING_READ(reg);
1512 udelay(150);
1513
1514 if (INTEL_GEN(dev_priv) >= 4) {
1515 I915_WRITE(DPLL_MD(crtc->pipe),
1516 crtc_state->dpll_hw_state.dpll_md);
1517 } else {
1518 /* The pixel multiplier can only be updated once the
1519 * DPLL is enabled and the clocks are stable.
1520 *
1521 * So write it again.
1522 */
1523 I915_WRITE(reg, dpll);
1524 }
1525
1526 /* We do this three times for luck */
1527 for (i = 0; i < 3; i++) {
1528 I915_WRITE(reg, dpll);
1529 POSTING_READ(reg);
1530 udelay(150); /* wait for warmup */
1531 }
1532 }
1533
1534 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1535 {
1536 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1537 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1538 enum pipe pipe = crtc->pipe;
1539
1540 /* Don't disable pipe or pipe PLLs if needed */
1541 if (IS_I830(dev_priv))
1542 return;
1543
1544 /* Make sure the pipe isn't still relying on us */
1545 assert_pipe_disabled(dev_priv, pipe);
1546
1547 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1548 POSTING_READ(DPLL(pipe));
1549 }
1550
1551 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1552 {
1553 u32 val;
1554
1555 /* Make sure the pipe isn't still relying on us */
1556 assert_pipe_disabled(dev_priv, pipe);
1557
1558 val = DPLL_INTEGRATED_REF_CLK_VLV |
1559 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1560 if (pipe != PIPE_A)
1561 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1562
1563 I915_WRITE(DPLL(pipe), val);
1564 POSTING_READ(DPLL(pipe));
1565 }
1566
1567 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1568 {
1569 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1570 u32 val;
1571
1572 /* Make sure the pipe isn't still relying on us */
1573 assert_pipe_disabled(dev_priv, pipe);
1574
1575 val = DPLL_SSC_REF_CLK_CHV |
1576 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1577 if (pipe != PIPE_A)
1578 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1579
1580 I915_WRITE(DPLL(pipe), val);
1581 POSTING_READ(DPLL(pipe));
1582
1583 vlv_dpio_get(dev_priv);
1584
1585 /* Disable 10bit clock to display controller */
1586 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1587 val &= ~DPIO_DCLKP_EN;
1588 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1589
1590 vlv_dpio_put(dev_priv);
1591 }
1592
1593 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1594 struct intel_digital_port *dport,
1595 unsigned int expected_mask)
1596 {
1597 u32 port_mask;
1598 i915_reg_t dpll_reg;
1599
1600 switch (dport->base.port) {
1601 case PORT_B:
1602 port_mask = DPLL_PORTB_READY_MASK;
1603 dpll_reg = DPLL(0);
1604 break;
1605 case PORT_C:
1606 port_mask = DPLL_PORTC_READY_MASK;
1607 dpll_reg = DPLL(0);
1608 expected_mask <<= 4;
1609 break;
1610 case PORT_D:
1611 port_mask = DPLL_PORTD_READY_MASK;
1612 dpll_reg = DPIO_PHY_STATUS;
1613 break;
1614 default:
1615 BUG();
1616 }
1617
1618 if (intel_wait_for_register(&dev_priv->uncore,
1619 dpll_reg, port_mask, expected_mask,
1620 1000))
1621 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1622 port_name(dport->base.port),
1623 I915_READ(dpll_reg) & port_mask, expected_mask);
1624 }
1625
1626 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1627 {
1628 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1629 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1630 enum pipe pipe = crtc->pipe;
1631 i915_reg_t reg;
1632 u32 val, pipeconf_val;
1633
1634 /* Make sure PCH DPLL is enabled */
1635 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1636
1637 /* FDI must be feeding us bits for PCH ports */
1638 assert_fdi_tx_enabled(dev_priv, pipe);
1639 assert_fdi_rx_enabled(dev_priv, pipe);
1640
1641 if (HAS_PCH_CPT(dev_priv)) {
1642 /* Workaround: Set the timing override bit before enabling the
1643 * pch transcoder. */
1644 reg = TRANS_CHICKEN2(pipe);
1645 val = I915_READ(reg);
1646 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1647 I915_WRITE(reg, val);
1648 }
1649
1650 reg = PCH_TRANSCONF(pipe);
1651 val = I915_READ(reg);
1652 pipeconf_val = I915_READ(PIPECONF(pipe));
1653
1654 if (HAS_PCH_IBX(dev_priv)) {
1655 /*
1656 * Make the BPC in transcoder be consistent with
1657 * that in pipeconf reg. For HDMI we must use 8bpc
1658 * here for both 8bpc and 12bpc.
1659 */
1660 val &= ~PIPECONF_BPC_MASK;
1661 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1662 val |= PIPECONF_8BPC;
1663 else
1664 val |= pipeconf_val & PIPECONF_BPC_MASK;
1665 }
1666
1667 val &= ~TRANS_INTERLACE_MASK;
1668 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1669 if (HAS_PCH_IBX(dev_priv) &&
1670 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1671 val |= TRANS_LEGACY_INTERLACED_ILK;
1672 else
1673 val |= TRANS_INTERLACED;
1674 } else {
1675 val |= TRANS_PROGRESSIVE;
1676 }
1677
1678 I915_WRITE(reg, val | TRANS_ENABLE);
1679 if (intel_wait_for_register(&dev_priv->uncore,
1680 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1681 100))
1682 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1683 }
1684
1685 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1686 enum transcoder cpu_transcoder)
1687 {
1688 u32 val, pipeconf_val;
1689
1690 /* FDI must be feeding us bits for PCH ports */
1691 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1692 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1693
1694 /* Workaround: set timing override bit. */
1695 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1696 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1697 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1698
1699 val = TRANS_ENABLE;
1700 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1701
1702 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1703 PIPECONF_INTERLACED_ILK)
1704 val |= TRANS_INTERLACED;
1705 else
1706 val |= TRANS_PROGRESSIVE;
1707
1708 I915_WRITE(LPT_TRANSCONF, val);
1709 if (intel_wait_for_register(&dev_priv->uncore,
1710 LPT_TRANSCONF,
1711 TRANS_STATE_ENABLE,
1712 TRANS_STATE_ENABLE,
1713 100))
1714 DRM_ERROR("Failed to enable PCH transcoder\n");
1715 }
1716
1717 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1718 enum pipe pipe)
1719 {
1720 i915_reg_t reg;
1721 u32 val;
1722
1723 /* FDI relies on the transcoder */
1724 assert_fdi_tx_disabled(dev_priv, pipe);
1725 assert_fdi_rx_disabled(dev_priv, pipe);
1726
1727 /* Ports must be off as well */
1728 assert_pch_ports_disabled(dev_priv, pipe);
1729
1730 reg = PCH_TRANSCONF(pipe);
1731 val = I915_READ(reg);
1732 val &= ~TRANS_ENABLE;
1733 I915_WRITE(reg, val);
1734 /* wait for PCH transcoder off, transcoder state */
1735 if (intel_wait_for_register(&dev_priv->uncore,
1736 reg, TRANS_STATE_ENABLE, 0,
1737 50))
1738 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1739
1740 if (HAS_PCH_CPT(dev_priv)) {
1741 /* Workaround: Clear the timing override chicken bit again. */
1742 reg = TRANS_CHICKEN2(pipe);
1743 val = I915_READ(reg);
1744 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1745 I915_WRITE(reg, val);
1746 }
1747 }
1748
1749 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1750 {
1751 u32 val;
1752
1753 val = I915_READ(LPT_TRANSCONF);
1754 val &= ~TRANS_ENABLE;
1755 I915_WRITE(LPT_TRANSCONF, val);
1756 /* wait for PCH transcoder off, transcoder state */
1757 if (intel_wait_for_register(&dev_priv->uncore,
1758 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1759 50))
1760 DRM_ERROR("Failed to disable PCH transcoder\n");
1761
1762 /* Workaround: clear timing override bit. */
1763 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1764 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1765 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1766 }
1767
1768 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1769 {
1770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1771
1772 if (HAS_PCH_LPT(dev_priv))
1773 return PIPE_A;
1774 else
1775 return crtc->pipe;
1776 }
1777
1778 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1779 {
1780 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1781
1782 /*
1783 * On i965gm the hardware frame counter reads
1784 * zero when the TV encoder is enabled :(
1785 */
1786 if (IS_I965GM(dev_priv) &&
1787 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1788 return 0;
1789
1790 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1791 return 0xffffffff; /* full 32 bit counter */
1792 else if (INTEL_GEN(dev_priv) >= 3)
1793 return 0xffffff; /* only 24 bits of frame count */
1794 else
1795 return 0; /* Gen2 doesn't have a hardware frame counter */
1796 }
1797
1798 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1799 {
1800 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1801
1802 drm_crtc_set_max_vblank_count(&crtc->base,
1803 intel_crtc_max_vblank_count(crtc_state));
1804 drm_crtc_vblank_on(&crtc->base);
1805 }
1806
1807 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1808 {
1809 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1810 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1811 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1812 enum pipe pipe = crtc->pipe;
1813 i915_reg_t reg;
1814 u32 val;
1815
1816 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1817
1818 assert_planes_disabled(crtc);
1819
1820 /*
1821 * A pipe without a PLL won't actually be able to drive bits from
1822 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1823 * need the check.
1824 */
1825 if (HAS_GMCH(dev_priv)) {
1826 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1827 assert_dsi_pll_enabled(dev_priv);
1828 else
1829 assert_pll_enabled(dev_priv, pipe);
1830 } else {
1831 if (new_crtc_state->has_pch_encoder) {
1832 /* if driving the PCH, we need FDI enabled */
1833 assert_fdi_rx_pll_enabled(dev_priv,
1834 intel_crtc_pch_transcoder(crtc));
1835 assert_fdi_tx_pll_enabled(dev_priv,
1836 (enum pipe) cpu_transcoder);
1837 }
1838 /* FIXME: assert CPU port conditions for SNB+ */
1839 }
1840
1841 trace_intel_pipe_enable(dev_priv, pipe);
1842
1843 reg = PIPECONF(cpu_transcoder);
1844 val = I915_READ(reg);
1845 if (val & PIPECONF_ENABLE) {
1846 /* we keep both pipes enabled on 830 */
1847 WARN_ON(!IS_I830(dev_priv));
1848 return;
1849 }
1850
1851 I915_WRITE(reg, val | PIPECONF_ENABLE);
1852 POSTING_READ(reg);
1853
1854 /*
1855 * Until the pipe starts PIPEDSL reads will return a stale value,
1856 * which causes an apparent vblank timestamp jump when PIPEDSL
1857 * resets to its proper value. That also messes up the frame count
1858 * when it's derived from the timestamps. So let's wait for the
1859 * pipe to start properly before we call drm_crtc_vblank_on()
1860 */
1861 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1862 intel_wait_for_pipe_scanline_moving(crtc);
1863 }
1864
1865 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1866 {
1867 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1869 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1870 enum pipe pipe = crtc->pipe;
1871 i915_reg_t reg;
1872 u32 val;
1873
1874 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1875
1876 /*
1877 * Make sure planes won't keep trying to pump pixels to us,
1878 * or we might hang the display.
1879 */
1880 assert_planes_disabled(crtc);
1881
1882 trace_intel_pipe_disable(dev_priv, pipe);
1883
1884 reg = PIPECONF(cpu_transcoder);
1885 val = I915_READ(reg);
1886 if ((val & PIPECONF_ENABLE) == 0)
1887 return;
1888
1889 /*
1890 * Double wide has implications for planes
1891 * so best keep it disabled when not needed.
1892 */
1893 if (old_crtc_state->double_wide)
1894 val &= ~PIPECONF_DOUBLE_WIDE;
1895
1896 /* Don't disable pipe or pipe PLLs if needed */
1897 if (!IS_I830(dev_priv))
1898 val &= ~PIPECONF_ENABLE;
1899
1900 I915_WRITE(reg, val);
1901 if ((val & PIPECONF_ENABLE) == 0)
1902 intel_wait_for_pipe_off(old_crtc_state);
1903 }
1904
1905 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1906 {
1907 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1908 }
1909
1910 static unsigned int
1911 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1912 {
1913 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1914 unsigned int cpp = fb->format->cpp[color_plane];
1915
1916 switch (fb->modifier) {
1917 case DRM_FORMAT_MOD_LINEAR:
1918 return intel_tile_size(dev_priv);
1919 case I915_FORMAT_MOD_X_TILED:
1920 if (IS_GEN(dev_priv, 2))
1921 return 128;
1922 else
1923 return 512;
1924 case I915_FORMAT_MOD_Y_TILED_CCS:
1925 if (color_plane == 1)
1926 return 128;
1927 /* fall through */
1928 case I915_FORMAT_MOD_Y_TILED:
1929 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1930 return 128;
1931 else
1932 return 512;
1933 case I915_FORMAT_MOD_Yf_TILED_CCS:
1934 if (color_plane == 1)
1935 return 128;
1936 /* fall through */
1937 case I915_FORMAT_MOD_Yf_TILED:
1938 switch (cpp) {
1939 case 1:
1940 return 64;
1941 case 2:
1942 case 4:
1943 return 128;
1944 case 8:
1945 case 16:
1946 return 256;
1947 default:
1948 MISSING_CASE(cpp);
1949 return cpp;
1950 }
1951 break;
1952 default:
1953 MISSING_CASE(fb->modifier);
1954 return cpp;
1955 }
1956 }
1957
1958 static unsigned int
1959 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1960 {
1961 return intel_tile_size(to_i915(fb->dev)) /
1962 intel_tile_width_bytes(fb, color_plane);
1963 }
1964
1965 /* Return the tile dimensions in pixel units */
1966 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1967 unsigned int *tile_width,
1968 unsigned int *tile_height)
1969 {
1970 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1971 unsigned int cpp = fb->format->cpp[color_plane];
1972
1973 *tile_width = tile_width_bytes / cpp;
1974 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1975 }
1976
1977 unsigned int
1978 intel_fb_align_height(const struct drm_framebuffer *fb,
1979 int color_plane, unsigned int height)
1980 {
1981 unsigned int tile_height = intel_tile_height(fb, color_plane);
1982
1983 return ALIGN(height, tile_height);
1984 }
1985
1986 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1987 {
1988 unsigned int size = 0;
1989 int i;
1990
1991 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1992 size += rot_info->plane[i].width * rot_info->plane[i].height;
1993
1994 return size;
1995 }
1996
1997 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1998 {
1999 unsigned int size = 0;
2000 int i;
2001
2002 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2003 size += rem_info->plane[i].width * rem_info->plane[i].height;
2004
2005 return size;
2006 }
2007
2008 static void
2009 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2010 const struct drm_framebuffer *fb,
2011 unsigned int rotation)
2012 {
2013 view->type = I915_GGTT_VIEW_NORMAL;
2014 if (drm_rotation_90_or_270(rotation)) {
2015 view->type = I915_GGTT_VIEW_ROTATED;
2016 view->rotated = to_intel_framebuffer(fb)->rot_info;
2017 }
2018 }
2019
2020 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2021 {
2022 if (IS_I830(dev_priv))
2023 return 16 * 1024;
2024 else if (IS_I85X(dev_priv))
2025 return 256;
2026 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2027 return 32;
2028 else
2029 return 4 * 1024;
2030 }
2031
2032 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2033 {
2034 if (INTEL_GEN(dev_priv) >= 9)
2035 return 256 * 1024;
2036 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2037 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2038 return 128 * 1024;
2039 else if (INTEL_GEN(dev_priv) >= 4)
2040 return 4 * 1024;
2041 else
2042 return 0;
2043 }
2044
2045 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2046 int color_plane)
2047 {
2048 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2049
2050 /* AUX_DIST needs only 4K alignment */
2051 if (color_plane == 1)
2052 return 4096;
2053
2054 switch (fb->modifier) {
2055 case DRM_FORMAT_MOD_LINEAR:
2056 return intel_linear_alignment(dev_priv);
2057 case I915_FORMAT_MOD_X_TILED:
2058 if (INTEL_GEN(dev_priv) >= 9)
2059 return 256 * 1024;
2060 return 0;
2061 case I915_FORMAT_MOD_Y_TILED_CCS:
2062 case I915_FORMAT_MOD_Yf_TILED_CCS:
2063 case I915_FORMAT_MOD_Y_TILED:
2064 case I915_FORMAT_MOD_Yf_TILED:
2065 return 1 * 1024 * 1024;
2066 default:
2067 MISSING_CASE(fb->modifier);
2068 return 0;
2069 }
2070 }
2071
2072 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2073 {
2074 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2075 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2076
2077 return INTEL_GEN(dev_priv) < 4 ||
2078 (plane->has_fbc &&
2079 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2080 }
2081
2082 struct i915_vma *
2083 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2084 const struct i915_ggtt_view *view,
2085 bool uses_fence,
2086 unsigned long *out_flags)
2087 {
2088 struct drm_device *dev = fb->dev;
2089 struct drm_i915_private *dev_priv = to_i915(dev);
2090 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2091 intel_wakeref_t wakeref;
2092 struct i915_vma *vma;
2093 unsigned int pinctl;
2094 u32 alignment;
2095
2096 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2097
2098 alignment = intel_surf_alignment(fb, 0);
2099
2100 /* Note that the w/a also requires 64 PTE of padding following the
2101 * bo. We currently fill all unused PTE with the shadow page and so
2102 * we should always have valid PTE following the scanout preventing
2103 * the VT-d warning.
2104 */
2105 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2106 alignment = 256 * 1024;
2107
2108 /*
2109 * Global gtt pte registers are special registers which actually forward
2110 * writes to a chunk of system memory. Which means that there is no risk
2111 * that the register values disappear as soon as we call
2112 * intel_runtime_pm_put(), so it is correct to wrap only the
2113 * pin/unpin/fence and not more.
2114 */
2115 wakeref = intel_runtime_pm_get(dev_priv);
2116 i915_gem_object_lock(obj);
2117
2118 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2119
2120 pinctl = 0;
2121
2122 /* Valleyview is definitely limited to scanning out the first
2123 * 512MiB. Lets presume this behaviour was inherited from the
2124 * g4x display engine and that all earlier gen are similarly
2125 * limited. Testing suggests that it is a little more
2126 * complicated than this. For example, Cherryview appears quite
2127 * happy to scanout from anywhere within its global aperture.
2128 */
2129 if (HAS_GMCH(dev_priv))
2130 pinctl |= PIN_MAPPABLE;
2131
2132 vma = i915_gem_object_pin_to_display_plane(obj,
2133 alignment, view, pinctl);
2134 if (IS_ERR(vma))
2135 goto err;
2136
2137 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2138 int ret;
2139
2140 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2141 * fence, whereas 965+ only requires a fence if using
2142 * framebuffer compression. For simplicity, we always, when
2143 * possible, install a fence as the cost is not that onerous.
2144 *
2145 * If we fail to fence the tiled scanout, then either the
2146 * modeset will reject the change (which is highly unlikely as
2147 * the affected systems, all but one, do not have unmappable
2148 * space) or we will not be able to enable full powersaving
2149 * techniques (also likely not to apply due to various limits
2150 * FBC and the like impose on the size of the buffer, which
2151 * presumably we violated anyway with this unmappable buffer).
2152 * Anyway, it is presumably better to stumble onwards with
2153 * something and try to run the system in a "less than optimal"
2154 * mode that matches the user configuration.
2155 */
2156 ret = i915_vma_pin_fence(vma);
2157 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2158 i915_gem_object_unpin_from_display_plane(vma);
2159 vma = ERR_PTR(ret);
2160 goto err;
2161 }
2162
2163 if (ret == 0 && vma->fence)
2164 *out_flags |= PLANE_HAS_FENCE;
2165 }
2166
2167 i915_vma_get(vma);
2168 err:
2169 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2170
2171 i915_gem_object_unlock(obj);
2172 intel_runtime_pm_put(dev_priv, wakeref);
2173 return vma;
2174 }
2175
2176 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2177 {
2178 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2179
2180 i915_gem_object_lock(vma->obj);
2181 if (flags & PLANE_HAS_FENCE)
2182 i915_vma_unpin_fence(vma);
2183 i915_gem_object_unpin_from_display_plane(vma);
2184 i915_gem_object_unlock(vma->obj);
2185
2186 i915_vma_put(vma);
2187 }
2188
2189 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2190 unsigned int rotation)
2191 {
2192 if (drm_rotation_90_or_270(rotation))
2193 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2194 else
2195 return fb->pitches[color_plane];
2196 }
2197
2198 /*
2199 * Convert the x/y offsets into a linear offset.
2200 * Only valid with 0/180 degree rotation, which is fine since linear
2201 * offset is only used with linear buffers on pre-hsw and tiled buffers
2202 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2203 */
2204 u32 intel_fb_xy_to_linear(int x, int y,
2205 const struct intel_plane_state *state,
2206 int color_plane)
2207 {
2208 const struct drm_framebuffer *fb = state->base.fb;
2209 unsigned int cpp = fb->format->cpp[color_plane];
2210 unsigned int pitch = state->color_plane[color_plane].stride;
2211
2212 return y * pitch + x * cpp;
2213 }
2214
2215 /*
2216 * Add the x/y offsets derived from fb->offsets[] to the user
2217 * specified plane src x/y offsets. The resulting x/y offsets
2218 * specify the start of scanout from the beginning of the gtt mapping.
2219 */
2220 void intel_add_fb_offsets(int *x, int *y,
2221 const struct intel_plane_state *state,
2222 int color_plane)
2223
2224 {
2225 *x += state->color_plane[color_plane].x;
2226 *y += state->color_plane[color_plane].y;
2227 }
2228
2229 static u32 intel_adjust_tile_offset(int *x, int *y,
2230 unsigned int tile_width,
2231 unsigned int tile_height,
2232 unsigned int tile_size,
2233 unsigned int pitch_tiles,
2234 u32 old_offset,
2235 u32 new_offset)
2236 {
2237 unsigned int pitch_pixels = pitch_tiles * tile_width;
2238 unsigned int tiles;
2239
2240 WARN_ON(old_offset & (tile_size - 1));
2241 WARN_ON(new_offset & (tile_size - 1));
2242 WARN_ON(new_offset > old_offset);
2243
2244 tiles = (old_offset - new_offset) / tile_size;
2245
2246 *y += tiles / pitch_tiles * tile_height;
2247 *x += tiles % pitch_tiles * tile_width;
2248
2249 /* minimize x in case it got needlessly big */
2250 *y += *x / pitch_pixels * tile_height;
2251 *x %= pitch_pixels;
2252
2253 return new_offset;
2254 }
2255
2256 static bool is_surface_linear(u64 modifier, int color_plane)
2257 {
2258 return modifier == DRM_FORMAT_MOD_LINEAR;
2259 }
2260
2261 static u32 intel_adjust_aligned_offset(int *x, int *y,
2262 const struct drm_framebuffer *fb,
2263 int color_plane,
2264 unsigned int rotation,
2265 unsigned int pitch,
2266 u32 old_offset, u32 new_offset)
2267 {
2268 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2269 unsigned int cpp = fb->format->cpp[color_plane];
2270
2271 WARN_ON(new_offset > old_offset);
2272
2273 if (!is_surface_linear(fb->modifier, color_plane)) {
2274 unsigned int tile_size, tile_width, tile_height;
2275 unsigned int pitch_tiles;
2276
2277 tile_size = intel_tile_size(dev_priv);
2278 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2279
2280 if (drm_rotation_90_or_270(rotation)) {
2281 pitch_tiles = pitch / tile_height;
2282 swap(tile_width, tile_height);
2283 } else {
2284 pitch_tiles = pitch / (tile_width * cpp);
2285 }
2286
2287 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2288 tile_size, pitch_tiles,
2289 old_offset, new_offset);
2290 } else {
2291 old_offset += *y * pitch + *x * cpp;
2292
2293 *y = (old_offset - new_offset) / pitch;
2294 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2295 }
2296
2297 return new_offset;
2298 }
2299
2300 /*
2301 * Adjust the tile offset by moving the difference into
2302 * the x/y offsets.
2303 */
2304 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2305 const struct intel_plane_state *state,
2306 int color_plane,
2307 u32 old_offset, u32 new_offset)
2308 {
2309 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2310 state->base.rotation,
2311 state->color_plane[color_plane].stride,
2312 old_offset, new_offset);
2313 }
2314
2315 /*
2316 * Computes the aligned offset to the base tile and adjusts
2317 * x, y. bytes per pixel is assumed to be a power-of-two.
2318 *
2319 * In the 90/270 rotated case, x and y are assumed
2320 * to be already rotated to match the rotated GTT view, and
2321 * pitch is the tile_height aligned framebuffer height.
2322 *
2323 * This function is used when computing the derived information
2324 * under intel_framebuffer, so using any of that information
2325 * here is not allowed. Anything under drm_framebuffer can be
2326 * used. This is why the user has to pass in the pitch since it
2327 * is specified in the rotated orientation.
2328 */
2329 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2330 int *x, int *y,
2331 const struct drm_framebuffer *fb,
2332 int color_plane,
2333 unsigned int pitch,
2334 unsigned int rotation,
2335 u32 alignment)
2336 {
2337 unsigned int cpp = fb->format->cpp[color_plane];
2338 u32 offset, offset_aligned;
2339
2340 if (alignment)
2341 alignment--;
2342
2343 if (!is_surface_linear(fb->modifier, color_plane)) {
2344 unsigned int tile_size, tile_width, tile_height;
2345 unsigned int tile_rows, tiles, pitch_tiles;
2346
2347 tile_size = intel_tile_size(dev_priv);
2348 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2349
2350 if (drm_rotation_90_or_270(rotation)) {
2351 pitch_tiles = pitch / tile_height;
2352 swap(tile_width, tile_height);
2353 } else {
2354 pitch_tiles = pitch / (tile_width * cpp);
2355 }
2356
2357 tile_rows = *y / tile_height;
2358 *y %= tile_height;
2359
2360 tiles = *x / tile_width;
2361 *x %= tile_width;
2362
2363 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2364 offset_aligned = offset & ~alignment;
2365
2366 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2367 tile_size, pitch_tiles,
2368 offset, offset_aligned);
2369 } else {
2370 offset = *y * pitch + *x * cpp;
2371 offset_aligned = offset & ~alignment;
2372
2373 *y = (offset & alignment) / pitch;
2374 *x = ((offset & alignment) - *y * pitch) / cpp;
2375 }
2376
2377 return offset_aligned;
2378 }
2379
2380 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2381 const struct intel_plane_state *state,
2382 int color_plane)
2383 {
2384 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2385 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2386 const struct drm_framebuffer *fb = state->base.fb;
2387 unsigned int rotation = state->base.rotation;
2388 int pitch = state->color_plane[color_plane].stride;
2389 u32 alignment;
2390
2391 if (intel_plane->id == PLANE_CURSOR)
2392 alignment = intel_cursor_alignment(dev_priv);
2393 else
2394 alignment = intel_surf_alignment(fb, color_plane);
2395
2396 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2397 pitch, rotation, alignment);
2398 }
2399
2400 /* Convert the fb->offset[] into x/y offsets */
2401 static int intel_fb_offset_to_xy(int *x, int *y,
2402 const struct drm_framebuffer *fb,
2403 int color_plane)
2404 {
2405 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2406 unsigned int height;
2407
2408 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2409 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2410 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2411 fb->offsets[color_plane], color_plane);
2412 return -EINVAL;
2413 }
2414
2415 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2416 height = ALIGN(height, intel_tile_height(fb, color_plane));
2417
2418 /* Catch potential overflows early */
2419 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2420 fb->offsets[color_plane])) {
2421 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2422 fb->offsets[color_plane], fb->pitches[color_plane],
2423 color_plane);
2424 return -ERANGE;
2425 }
2426
2427 *x = 0;
2428 *y = 0;
2429
2430 intel_adjust_aligned_offset(x, y,
2431 fb, color_plane, DRM_MODE_ROTATE_0,
2432 fb->pitches[color_plane],
2433 fb->offsets[color_plane], 0);
2434
2435 return 0;
2436 }
2437
2438 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2439 {
2440 switch (fb_modifier) {
2441 case I915_FORMAT_MOD_X_TILED:
2442 return I915_TILING_X;
2443 case I915_FORMAT_MOD_Y_TILED:
2444 case I915_FORMAT_MOD_Y_TILED_CCS:
2445 return I915_TILING_Y;
2446 default:
2447 return I915_TILING_NONE;
2448 }
2449 }
2450
2451 /*
2452 * From the Sky Lake PRM:
2453 * "The Color Control Surface (CCS) contains the compression status of
2454 * the cache-line pairs. The compression state of the cache-line pair
2455 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2456 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2457 * cache-line-pairs. CCS is always Y tiled."
2458 *
2459 * Since cache line pairs refers to horizontally adjacent cache lines,
2460 * each cache line in the CCS corresponds to an area of 32x16 cache
2461 * lines on the main surface. Since each pixel is 4 bytes, this gives
2462 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2463 * main surface.
2464 */
2465 static const struct drm_format_info ccs_formats[] = {
2466 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2467 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2468 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2469 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2470 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2471 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2472 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2473 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2474 };
2475
2476 static const struct drm_format_info *
2477 lookup_format_info(const struct drm_format_info formats[],
2478 int num_formats, u32 format)
2479 {
2480 int i;
2481
2482 for (i = 0; i < num_formats; i++) {
2483 if (formats[i].format == format)
2484 return &formats[i];
2485 }
2486
2487 return NULL;
2488 }
2489
2490 static const struct drm_format_info *
2491 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2492 {
2493 switch (cmd->modifier[0]) {
2494 case I915_FORMAT_MOD_Y_TILED_CCS:
2495 case I915_FORMAT_MOD_Yf_TILED_CCS:
2496 return lookup_format_info(ccs_formats,
2497 ARRAY_SIZE(ccs_formats),
2498 cmd->pixel_format);
2499 default:
2500 return NULL;
2501 }
2502 }
2503
2504 bool is_ccs_modifier(u64 modifier)
2505 {
2506 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2507 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2508 }
2509
2510 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2511 u32 pixel_format, u64 modifier)
2512 {
2513 struct intel_crtc *crtc;
2514 struct intel_plane *plane;
2515
2516 /*
2517 * We assume the primary plane for pipe A has
2518 * the highest stride limits of them all.
2519 */
2520 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2521 plane = to_intel_plane(crtc->base.primary);
2522
2523 return plane->max_stride(plane, pixel_format, modifier,
2524 DRM_MODE_ROTATE_0);
2525 }
2526
2527 static
2528 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2529 u32 pixel_format, u64 modifier)
2530 {
2531 /*
2532 * Arbitrary limit for gen4+ chosen to match the
2533 * render engine max stride.
2534 *
2535 * The new CCS hash mode makes remapping impossible
2536 */
2537 if (!is_ccs_modifier(modifier)) {
2538 if (INTEL_GEN(dev_priv) >= 7)
2539 return 256*1024;
2540 else if (INTEL_GEN(dev_priv) >= 4)
2541 return 128*1024;
2542 }
2543
2544 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2545 }
2546
2547 static u32
2548 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2549 {
2550 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2551
2552 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2553 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2554 fb->format->format,
2555 fb->modifier);
2556
2557 /*
2558 * To make remapping with linear generally feasible
2559 * we need the stride to be page aligned.
2560 */
2561 if (fb->pitches[color_plane] > max_stride)
2562 return intel_tile_size(dev_priv);
2563 else
2564 return 64;
2565 } else {
2566 return intel_tile_width_bytes(fb, color_plane);
2567 }
2568 }
2569
2570 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2571 {
2572 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2573 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2574 const struct drm_framebuffer *fb = plane_state->base.fb;
2575 int i;
2576
2577 /* We don't want to deal with remapping with cursors */
2578 if (plane->id == PLANE_CURSOR)
2579 return false;
2580
2581 /*
2582 * The display engine limits already match/exceed the
2583 * render engine limits, so not much point in remapping.
2584 * Would also need to deal with the fence POT alignment
2585 * and gen2 2KiB GTT tile size.
2586 */
2587 if (INTEL_GEN(dev_priv) < 4)
2588 return false;
2589
2590 /*
2591 * The new CCS hash mode isn't compatible with remapping as
2592 * the virtual address of the pages affects the compressed data.
2593 */
2594 if (is_ccs_modifier(fb->modifier))
2595 return false;
2596
2597 /* Linear needs a page aligned stride for remapping */
2598 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2599 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2600
2601 for (i = 0; i < fb->format->num_planes; i++) {
2602 if (fb->pitches[i] & alignment)
2603 return false;
2604 }
2605 }
2606
2607 return true;
2608 }
2609
2610 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2611 {
2612 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2613 const struct drm_framebuffer *fb = plane_state->base.fb;
2614 unsigned int rotation = plane_state->base.rotation;
2615 u32 stride, max_stride;
2616
2617 /*
2618 * No remapping for invisible planes since we don't have
2619 * an actual source viewport to remap.
2620 */
2621 if (!plane_state->base.visible)
2622 return false;
2623
2624 if (!intel_plane_can_remap(plane_state))
2625 return false;
2626
2627 /*
2628 * FIXME: aux plane limits on gen9+ are
2629 * unclear in Bspec, for now no checking.
2630 */
2631 stride = intel_fb_pitch(fb, 0, rotation);
2632 max_stride = plane->max_stride(plane, fb->format->format,
2633 fb->modifier, rotation);
2634
2635 return stride > max_stride;
2636 }
2637
2638 static int
2639 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2640 struct drm_framebuffer *fb)
2641 {
2642 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2643 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2644 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2645 u32 gtt_offset_rotated = 0;
2646 unsigned int max_size = 0;
2647 int i, num_planes = fb->format->num_planes;
2648 unsigned int tile_size = intel_tile_size(dev_priv);
2649
2650 for (i = 0; i < num_planes; i++) {
2651 unsigned int width, height;
2652 unsigned int cpp, size;
2653 u32 offset;
2654 int x, y;
2655 int ret;
2656
2657 cpp = fb->format->cpp[i];
2658 width = drm_framebuffer_plane_width(fb->width, fb, i);
2659 height = drm_framebuffer_plane_height(fb->height, fb, i);
2660
2661 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2662 if (ret) {
2663 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2664 i, fb->offsets[i]);
2665 return ret;
2666 }
2667
2668 if (is_ccs_modifier(fb->modifier) && i == 1) {
2669 int hsub = fb->format->hsub;
2670 int vsub = fb->format->vsub;
2671 int tile_width, tile_height;
2672 int main_x, main_y;
2673 int ccs_x, ccs_y;
2674
2675 intel_tile_dims(fb, i, &tile_width, &tile_height);
2676 tile_width *= hsub;
2677 tile_height *= vsub;
2678
2679 ccs_x = (x * hsub) % tile_width;
2680 ccs_y = (y * vsub) % tile_height;
2681 main_x = intel_fb->normal[0].x % tile_width;
2682 main_y = intel_fb->normal[0].y % tile_height;
2683
2684 /*
2685 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2686 * x/y offsets must match between CCS and the main surface.
2687 */
2688 if (main_x != ccs_x || main_y != ccs_y) {
2689 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2690 main_x, main_y,
2691 ccs_x, ccs_y,
2692 intel_fb->normal[0].x,
2693 intel_fb->normal[0].y,
2694 x, y);
2695 return -EINVAL;
2696 }
2697 }
2698
2699 /*
2700 * The fence (if used) is aligned to the start of the object
2701 * so having the framebuffer wrap around across the edge of the
2702 * fenced region doesn't really work. We have no API to configure
2703 * the fence start offset within the object (nor could we probably
2704 * on gen2/3). So it's just easier if we just require that the
2705 * fb layout agrees with the fence layout. We already check that the
2706 * fb stride matches the fence stride elsewhere.
2707 */
2708 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2709 (x + width) * cpp > fb->pitches[i]) {
2710 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2711 i, fb->offsets[i]);
2712 return -EINVAL;
2713 }
2714
2715 /*
2716 * First pixel of the framebuffer from
2717 * the start of the normal gtt mapping.
2718 */
2719 intel_fb->normal[i].x = x;
2720 intel_fb->normal[i].y = y;
2721
2722 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2723 fb->pitches[i],
2724 DRM_MODE_ROTATE_0,
2725 tile_size);
2726 offset /= tile_size;
2727
2728 if (!is_surface_linear(fb->modifier, i)) {
2729 unsigned int tile_width, tile_height;
2730 unsigned int pitch_tiles;
2731 struct drm_rect r;
2732
2733 intel_tile_dims(fb, i, &tile_width, &tile_height);
2734
2735 rot_info->plane[i].offset = offset;
2736 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2737 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2738 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2739
2740 intel_fb->rotated[i].pitch =
2741 rot_info->plane[i].height * tile_height;
2742
2743 /* how many tiles does this plane need */
2744 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2745 /*
2746 * If the plane isn't horizontally tile aligned,
2747 * we need one more tile.
2748 */
2749 if (x != 0)
2750 size++;
2751
2752 /* rotate the x/y offsets to match the GTT view */
2753 r.x1 = x;
2754 r.y1 = y;
2755 r.x2 = x + width;
2756 r.y2 = y + height;
2757 drm_rect_rotate(&r,
2758 rot_info->plane[i].width * tile_width,
2759 rot_info->plane[i].height * tile_height,
2760 DRM_MODE_ROTATE_270);
2761 x = r.x1;
2762 y = r.y1;
2763
2764 /* rotate the tile dimensions to match the GTT view */
2765 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2766 swap(tile_width, tile_height);
2767
2768 /*
2769 * We only keep the x/y offsets, so push all of the
2770 * gtt offset into the x/y offsets.
2771 */
2772 intel_adjust_tile_offset(&x, &y,
2773 tile_width, tile_height,
2774 tile_size, pitch_tiles,
2775 gtt_offset_rotated * tile_size, 0);
2776
2777 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2778
2779 /*
2780 * First pixel of the framebuffer from
2781 * the start of the rotated gtt mapping.
2782 */
2783 intel_fb->rotated[i].x = x;
2784 intel_fb->rotated[i].y = y;
2785 } else {
2786 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2787 x * cpp, tile_size);
2788 }
2789
2790 /* how many tiles in total needed in the bo */
2791 max_size = max(max_size, offset + size);
2792 }
2793
2794 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2795 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2796 mul_u32_u32(max_size, tile_size), obj->base.size);
2797 return -EINVAL;
2798 }
2799
2800 return 0;
2801 }
2802
2803 static void
2804 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2805 {
2806 struct drm_i915_private *dev_priv =
2807 to_i915(plane_state->base.plane->dev);
2808 struct drm_framebuffer *fb = plane_state->base.fb;
2809 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2810 struct intel_rotation_info *info = &plane_state->view.rotated;
2811 unsigned int rotation = plane_state->base.rotation;
2812 int i, num_planes = fb->format->num_planes;
2813 unsigned int tile_size = intel_tile_size(dev_priv);
2814 unsigned int src_x, src_y;
2815 unsigned int src_w, src_h;
2816 u32 gtt_offset = 0;
2817
2818 memset(&plane_state->view, 0, sizeof(plane_state->view));
2819 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2820 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2821
2822 src_x = plane_state->base.src.x1 >> 16;
2823 src_y = plane_state->base.src.y1 >> 16;
2824 src_w = drm_rect_width(&plane_state->base.src) >> 16;
2825 src_h = drm_rect_height(&plane_state->base.src) >> 16;
2826
2827 WARN_ON(is_ccs_modifier(fb->modifier));
2828
2829 /* Make src coordinates relative to the viewport */
2830 drm_rect_translate(&plane_state->base.src,
2831 -(src_x << 16), -(src_y << 16));
2832
2833 /* Rotate src coordinates to match rotated GTT view */
2834 if (drm_rotation_90_or_270(rotation))
2835 drm_rect_rotate(&plane_state->base.src,
2836 src_w << 16, src_h << 16,
2837 DRM_MODE_ROTATE_270);
2838
2839 for (i = 0; i < num_planes; i++) {
2840 unsigned int hsub = i ? fb->format->hsub : 1;
2841 unsigned int vsub = i ? fb->format->vsub : 1;
2842 unsigned int cpp = fb->format->cpp[i];
2843 unsigned int tile_width, tile_height;
2844 unsigned int width, height;
2845 unsigned int pitch_tiles;
2846 unsigned int x, y;
2847 u32 offset;
2848
2849 intel_tile_dims(fb, i, &tile_width, &tile_height);
2850
2851 x = src_x / hsub;
2852 y = src_y / vsub;
2853 width = src_w / hsub;
2854 height = src_h / vsub;
2855
2856 /*
2857 * First pixel of the src viewport from the
2858 * start of the normal gtt mapping.
2859 */
2860 x += intel_fb->normal[i].x;
2861 y += intel_fb->normal[i].y;
2862
2863 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2864 fb, i, fb->pitches[i],
2865 DRM_MODE_ROTATE_0, tile_size);
2866 offset /= tile_size;
2867
2868 info->plane[i].offset = offset;
2869 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2870 tile_width * cpp);
2871 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2872 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2873
2874 if (drm_rotation_90_or_270(rotation)) {
2875 struct drm_rect r;
2876
2877 /* rotate the x/y offsets to match the GTT view */
2878 r.x1 = x;
2879 r.y1 = y;
2880 r.x2 = x + width;
2881 r.y2 = y + height;
2882 drm_rect_rotate(&r,
2883 info->plane[i].width * tile_width,
2884 info->plane[i].height * tile_height,
2885 DRM_MODE_ROTATE_270);
2886 x = r.x1;
2887 y = r.y1;
2888
2889 pitch_tiles = info->plane[i].height;
2890 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2891
2892 /* rotate the tile dimensions to match the GTT view */
2893 swap(tile_width, tile_height);
2894 } else {
2895 pitch_tiles = info->plane[i].width;
2896 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2897 }
2898
2899 /*
2900 * We only keep the x/y offsets, so push all of the
2901 * gtt offset into the x/y offsets.
2902 */
2903 intel_adjust_tile_offset(&x, &y,
2904 tile_width, tile_height,
2905 tile_size, pitch_tiles,
2906 gtt_offset * tile_size, 0);
2907
2908 gtt_offset += info->plane[i].width * info->plane[i].height;
2909
2910 plane_state->color_plane[i].offset = 0;
2911 plane_state->color_plane[i].x = x;
2912 plane_state->color_plane[i].y = y;
2913 }
2914 }
2915
2916 static int
2917 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2918 {
2919 const struct intel_framebuffer *fb =
2920 to_intel_framebuffer(plane_state->base.fb);
2921 unsigned int rotation = plane_state->base.rotation;
2922 int i, num_planes;
2923
2924 if (!fb)
2925 return 0;
2926
2927 num_planes = fb->base.format->num_planes;
2928
2929 if (intel_plane_needs_remap(plane_state)) {
2930 intel_plane_remap_gtt(plane_state);
2931
2932 /*
2933 * Sometimes even remapping can't overcome
2934 * the stride limitations :( Can happen with
2935 * big plane sizes and suitably misaligned
2936 * offsets.
2937 */
2938 return intel_plane_check_stride(plane_state);
2939 }
2940
2941 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2942
2943 for (i = 0; i < num_planes; i++) {
2944 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2945 plane_state->color_plane[i].offset = 0;
2946
2947 if (drm_rotation_90_or_270(rotation)) {
2948 plane_state->color_plane[i].x = fb->rotated[i].x;
2949 plane_state->color_plane[i].y = fb->rotated[i].y;
2950 } else {
2951 plane_state->color_plane[i].x = fb->normal[i].x;
2952 plane_state->color_plane[i].y = fb->normal[i].y;
2953 }
2954 }
2955
2956 /* Rotate src coordinates to match rotated GTT view */
2957 if (drm_rotation_90_or_270(rotation))
2958 drm_rect_rotate(&plane_state->base.src,
2959 fb->base.width << 16, fb->base.height << 16,
2960 DRM_MODE_ROTATE_270);
2961
2962 return intel_plane_check_stride(plane_state);
2963 }
2964
2965 static int i9xx_format_to_fourcc(int format)
2966 {
2967 switch (format) {
2968 case DISPPLANE_8BPP:
2969 return DRM_FORMAT_C8;
2970 case DISPPLANE_BGRX555:
2971 return DRM_FORMAT_XRGB1555;
2972 case DISPPLANE_BGRX565:
2973 return DRM_FORMAT_RGB565;
2974 default:
2975 case DISPPLANE_BGRX888:
2976 return DRM_FORMAT_XRGB8888;
2977 case DISPPLANE_RGBX888:
2978 return DRM_FORMAT_XBGR8888;
2979 case DISPPLANE_BGRX101010:
2980 return DRM_FORMAT_XRGB2101010;
2981 case DISPPLANE_RGBX101010:
2982 return DRM_FORMAT_XBGR2101010;
2983 }
2984 }
2985
2986 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2987 {
2988 switch (format) {
2989 case PLANE_CTL_FORMAT_RGB_565:
2990 return DRM_FORMAT_RGB565;
2991 case PLANE_CTL_FORMAT_NV12:
2992 return DRM_FORMAT_NV12;
2993 case PLANE_CTL_FORMAT_P010:
2994 return DRM_FORMAT_P010;
2995 case PLANE_CTL_FORMAT_P012:
2996 return DRM_FORMAT_P012;
2997 case PLANE_CTL_FORMAT_P016:
2998 return DRM_FORMAT_P016;
2999 case PLANE_CTL_FORMAT_Y210:
3000 return DRM_FORMAT_Y210;
3001 case PLANE_CTL_FORMAT_Y212:
3002 return DRM_FORMAT_Y212;
3003 case PLANE_CTL_FORMAT_Y216:
3004 return DRM_FORMAT_Y216;
3005 case PLANE_CTL_FORMAT_Y410:
3006 return DRM_FORMAT_XVYU2101010;
3007 case PLANE_CTL_FORMAT_Y412:
3008 return DRM_FORMAT_XVYU12_16161616;
3009 case PLANE_CTL_FORMAT_Y416:
3010 return DRM_FORMAT_XVYU16161616;
3011 default:
3012 case PLANE_CTL_FORMAT_XRGB_8888:
3013 if (rgb_order) {
3014 if (alpha)
3015 return DRM_FORMAT_ABGR8888;
3016 else
3017 return DRM_FORMAT_XBGR8888;
3018 } else {
3019 if (alpha)
3020 return DRM_FORMAT_ARGB8888;
3021 else
3022 return DRM_FORMAT_XRGB8888;
3023 }
3024 case PLANE_CTL_FORMAT_XRGB_2101010:
3025 if (rgb_order)
3026 return DRM_FORMAT_XBGR2101010;
3027 else
3028 return DRM_FORMAT_XRGB2101010;
3029 case PLANE_CTL_FORMAT_XRGB_16161616F:
3030 if (rgb_order) {
3031 if (alpha)
3032 return DRM_FORMAT_ABGR16161616F;
3033 else
3034 return DRM_FORMAT_XBGR16161616F;
3035 } else {
3036 if (alpha)
3037 return DRM_FORMAT_ARGB16161616F;
3038 else
3039 return DRM_FORMAT_XRGB16161616F;
3040 }
3041 }
3042 }
3043
3044 static bool
3045 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3046 struct intel_initial_plane_config *plane_config)
3047 {
3048 struct drm_device *dev = crtc->base.dev;
3049 struct drm_i915_private *dev_priv = to_i915(dev);
3050 struct drm_i915_gem_object *obj = NULL;
3051 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3052 struct drm_framebuffer *fb = &plane_config->fb->base;
3053 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3054 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3055 PAGE_SIZE);
3056
3057 size_aligned -= base_aligned;
3058
3059 if (plane_config->size == 0)
3060 return false;
3061
3062 /* If the FB is too big, just don't use it since fbdev is not very
3063 * important and we should probably use that space with FBC or other
3064 * features. */
3065 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3066 return false;
3067
3068 switch (fb->modifier) {
3069 case DRM_FORMAT_MOD_LINEAR:
3070 case I915_FORMAT_MOD_X_TILED:
3071 case I915_FORMAT_MOD_Y_TILED:
3072 break;
3073 default:
3074 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3075 fb->modifier);
3076 return false;
3077 }
3078
3079 mutex_lock(&dev->struct_mutex);
3080 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3081 base_aligned,
3082 base_aligned,
3083 size_aligned);
3084 mutex_unlock(&dev->struct_mutex);
3085 if (!obj)
3086 return false;
3087
3088 switch (plane_config->tiling) {
3089 case I915_TILING_NONE:
3090 break;
3091 case I915_TILING_X:
3092 case I915_TILING_Y:
3093 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3094 break;
3095 default:
3096 MISSING_CASE(plane_config->tiling);
3097 return false;
3098 }
3099
3100 mode_cmd.pixel_format = fb->format->format;
3101 mode_cmd.width = fb->width;
3102 mode_cmd.height = fb->height;
3103 mode_cmd.pitches[0] = fb->pitches[0];
3104 mode_cmd.modifier[0] = fb->modifier;
3105 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3106
3107 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3108 DRM_DEBUG_KMS("intel fb init failed\n");
3109 goto out_unref_obj;
3110 }
3111
3112
3113 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3114 return true;
3115
3116 out_unref_obj:
3117 i915_gem_object_put(obj);
3118 return false;
3119 }
3120
3121 static void
3122 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3123 struct intel_plane_state *plane_state,
3124 bool visible)
3125 {
3126 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3127
3128 plane_state->base.visible = visible;
3129
3130 if (visible)
3131 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3132 else
3133 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3134 }
3135
3136 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3137 {
3138 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3139 struct drm_plane *plane;
3140
3141 /*
3142 * Active_planes aliases if multiple "primary" or cursor planes
3143 * have been used on the same (or wrong) pipe. plane_mask uses
3144 * unique ids, hence we can use that to reconstruct active_planes.
3145 */
3146 crtc_state->active_planes = 0;
3147
3148 drm_for_each_plane_mask(plane, &dev_priv->drm,
3149 crtc_state->base.plane_mask)
3150 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3151 }
3152
3153 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3154 struct intel_plane *plane)
3155 {
3156 struct intel_crtc_state *crtc_state =
3157 to_intel_crtc_state(crtc->base.state);
3158 struct intel_plane_state *plane_state =
3159 to_intel_plane_state(plane->base.state);
3160
3161 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3162 plane->base.base.id, plane->base.name,
3163 crtc->base.base.id, crtc->base.name);
3164
3165 intel_set_plane_visible(crtc_state, plane_state, false);
3166 fixup_active_planes(crtc_state);
3167 crtc_state->data_rate[plane->id] = 0;
3168
3169 if (plane->id == PLANE_PRIMARY)
3170 intel_pre_disable_primary_noatomic(&crtc->base);
3171
3172 intel_disable_plane(plane, crtc_state);
3173 }
3174
3175 static void
3176 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3177 struct intel_initial_plane_config *plane_config)
3178 {
3179 struct drm_device *dev = intel_crtc->base.dev;
3180 struct drm_i915_private *dev_priv = to_i915(dev);
3181 struct drm_crtc *c;
3182 struct drm_i915_gem_object *obj;
3183 struct drm_plane *primary = intel_crtc->base.primary;
3184 struct drm_plane_state *plane_state = primary->state;
3185 struct intel_plane *intel_plane = to_intel_plane(primary);
3186 struct intel_plane_state *intel_state =
3187 to_intel_plane_state(plane_state);
3188 struct drm_framebuffer *fb;
3189
3190 if (!plane_config->fb)
3191 return;
3192
3193 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3194 fb = &plane_config->fb->base;
3195 goto valid_fb;
3196 }
3197
3198 kfree(plane_config->fb);
3199
3200 /*
3201 * Failed to alloc the obj, check to see if we should share
3202 * an fb with another CRTC instead
3203 */
3204 for_each_crtc(dev, c) {
3205 struct intel_plane_state *state;
3206
3207 if (c == &intel_crtc->base)
3208 continue;
3209
3210 if (!to_intel_crtc(c)->active)
3211 continue;
3212
3213 state = to_intel_plane_state(c->primary->state);
3214 if (!state->vma)
3215 continue;
3216
3217 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3218 fb = state->base.fb;
3219 drm_framebuffer_get(fb);
3220 goto valid_fb;
3221 }
3222 }
3223
3224 /*
3225 * We've failed to reconstruct the BIOS FB. Current display state
3226 * indicates that the primary plane is visible, but has a NULL FB,
3227 * which will lead to problems later if we don't fix it up. The
3228 * simplest solution is to just disable the primary plane now and
3229 * pretend the BIOS never had it enabled.
3230 */
3231 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3232
3233 return;
3234
3235 valid_fb:
3236 intel_state->base.rotation = plane_config->rotation;
3237 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3238 intel_state->base.rotation);
3239 intel_state->color_plane[0].stride =
3240 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3241
3242 mutex_lock(&dev->struct_mutex);
3243 intel_state->vma =
3244 intel_pin_and_fence_fb_obj(fb,
3245 &intel_state->view,
3246 intel_plane_uses_fence(intel_state),
3247 &intel_state->flags);
3248 mutex_unlock(&dev->struct_mutex);
3249 if (IS_ERR(intel_state->vma)) {
3250 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3251 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3252
3253 intel_state->vma = NULL;
3254 drm_framebuffer_put(fb);
3255 return;
3256 }
3257
3258 obj = intel_fb_obj(fb);
3259 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
3260
3261 plane_state->src_x = 0;
3262 plane_state->src_y = 0;
3263 plane_state->src_w = fb->width << 16;
3264 plane_state->src_h = fb->height << 16;
3265
3266 plane_state->crtc_x = 0;
3267 plane_state->crtc_y = 0;
3268 plane_state->crtc_w = fb->width;
3269 plane_state->crtc_h = fb->height;
3270
3271 intel_state->base.src = drm_plane_state_src(plane_state);
3272 intel_state->base.dst = drm_plane_state_dest(plane_state);
3273
3274 if (i915_gem_object_is_tiled(obj))
3275 dev_priv->preserve_bios_swizzle = true;
3276
3277 plane_state->fb = fb;
3278 plane_state->crtc = &intel_crtc->base;
3279
3280 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3281 &obj->frontbuffer_bits);
3282 }
3283
3284 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3285 int color_plane,
3286 unsigned int rotation)
3287 {
3288 int cpp = fb->format->cpp[color_plane];
3289
3290 switch (fb->modifier) {
3291 case DRM_FORMAT_MOD_LINEAR:
3292 case I915_FORMAT_MOD_X_TILED:
3293 return 4096;
3294 case I915_FORMAT_MOD_Y_TILED_CCS:
3295 case I915_FORMAT_MOD_Yf_TILED_CCS:
3296 /* FIXME AUX plane? */
3297 case I915_FORMAT_MOD_Y_TILED:
3298 case I915_FORMAT_MOD_Yf_TILED:
3299 if (cpp == 8)
3300 return 2048;
3301 else
3302 return 4096;
3303 default:
3304 MISSING_CASE(fb->modifier);
3305 return 2048;
3306 }
3307 }
3308
3309 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3310 int color_plane,
3311 unsigned int rotation)
3312 {
3313 int cpp = fb->format->cpp[color_plane];
3314
3315 switch (fb->modifier) {
3316 case DRM_FORMAT_MOD_LINEAR:
3317 case I915_FORMAT_MOD_X_TILED:
3318 if (cpp == 8)
3319 return 4096;
3320 else
3321 return 5120;
3322 case I915_FORMAT_MOD_Y_TILED_CCS:
3323 case I915_FORMAT_MOD_Yf_TILED_CCS:
3324 /* FIXME AUX plane? */
3325 case I915_FORMAT_MOD_Y_TILED:
3326 case I915_FORMAT_MOD_Yf_TILED:
3327 if (cpp == 8)
3328 return 2048;
3329 else
3330 return 5120;
3331 default:
3332 MISSING_CASE(fb->modifier);
3333 return 2048;
3334 }
3335 }
3336
3337 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3338 int color_plane,
3339 unsigned int rotation)
3340 {
3341 return 5120;
3342 }
3343
3344 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3345 int main_x, int main_y, u32 main_offset)
3346 {
3347 const struct drm_framebuffer *fb = plane_state->base.fb;
3348 int hsub = fb->format->hsub;
3349 int vsub = fb->format->vsub;
3350 int aux_x = plane_state->color_plane[1].x;
3351 int aux_y = plane_state->color_plane[1].y;
3352 u32 aux_offset = plane_state->color_plane[1].offset;
3353 u32 alignment = intel_surf_alignment(fb, 1);
3354
3355 while (aux_offset >= main_offset && aux_y <= main_y) {
3356 int x, y;
3357
3358 if (aux_x == main_x && aux_y == main_y)
3359 break;
3360
3361 if (aux_offset == 0)
3362 break;
3363
3364 x = aux_x / hsub;
3365 y = aux_y / vsub;
3366 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3367 aux_offset, aux_offset - alignment);
3368 aux_x = x * hsub + aux_x % hsub;
3369 aux_y = y * vsub + aux_y % vsub;
3370 }
3371
3372 if (aux_x != main_x || aux_y != main_y)
3373 return false;
3374
3375 plane_state->color_plane[1].offset = aux_offset;
3376 plane_state->color_plane[1].x = aux_x;
3377 plane_state->color_plane[1].y = aux_y;
3378
3379 return true;
3380 }
3381
3382 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3383 {
3384 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3385 const struct drm_framebuffer *fb = plane_state->base.fb;
3386 unsigned int rotation = plane_state->base.rotation;
3387 int x = plane_state->base.src.x1 >> 16;
3388 int y = plane_state->base.src.y1 >> 16;
3389 int w = drm_rect_width(&plane_state->base.src) >> 16;
3390 int h = drm_rect_height(&plane_state->base.src) >> 16;
3391 int max_width;
3392 int max_height = 4096;
3393 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3394
3395 if (INTEL_GEN(dev_priv) >= 11)
3396 max_width = icl_max_plane_width(fb, 0, rotation);
3397 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3398 max_width = glk_max_plane_width(fb, 0, rotation);
3399 else
3400 max_width = skl_max_plane_width(fb, 0, rotation);
3401
3402 if (w > max_width || h > max_height) {
3403 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3404 w, h, max_width, max_height);
3405 return -EINVAL;
3406 }
3407
3408 intel_add_fb_offsets(&x, &y, plane_state, 0);
3409 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3410 alignment = intel_surf_alignment(fb, 0);
3411
3412 /*
3413 * AUX surface offset is specified as the distance from the
3414 * main surface offset, and it must be non-negative. Make
3415 * sure that is what we will get.
3416 */
3417 if (offset > aux_offset)
3418 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3419 offset, aux_offset & ~(alignment - 1));
3420
3421 /*
3422 * When using an X-tiled surface, the plane blows up
3423 * if the x offset + width exceed the stride.
3424 *
3425 * TODO: linear and Y-tiled seem fine, Yf untested,
3426 */
3427 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3428 int cpp = fb->format->cpp[0];
3429
3430 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3431 if (offset == 0) {
3432 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3433 return -EINVAL;
3434 }
3435
3436 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3437 offset, offset - alignment);
3438 }
3439 }
3440
3441 /*
3442 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3443 * they match with the main surface x/y offsets.
3444 */
3445 if (is_ccs_modifier(fb->modifier)) {
3446 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3447 if (offset == 0)
3448 break;
3449
3450 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3451 offset, offset - alignment);
3452 }
3453
3454 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3455 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3456 return -EINVAL;
3457 }
3458 }
3459
3460 plane_state->color_plane[0].offset = offset;
3461 plane_state->color_plane[0].x = x;
3462 plane_state->color_plane[0].y = y;
3463
3464 /*
3465 * Put the final coordinates back so that the src
3466 * coordinate checks will see the right values.
3467 */
3468 drm_rect_translate(&plane_state->base.src,
3469 (x << 16) - plane_state->base.src.x1,
3470 (y << 16) - plane_state->base.src.y1);
3471
3472 return 0;
3473 }
3474
3475 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3476 {
3477 const struct drm_framebuffer *fb = plane_state->base.fb;
3478 unsigned int rotation = plane_state->base.rotation;
3479 int max_width = skl_max_plane_width(fb, 1, rotation);
3480 int max_height = 4096;
3481 int x = plane_state->base.src.x1 >> 17;
3482 int y = plane_state->base.src.y1 >> 17;
3483 int w = drm_rect_width(&plane_state->base.src) >> 17;
3484 int h = drm_rect_height(&plane_state->base.src) >> 17;
3485 u32 offset;
3486
3487 intel_add_fb_offsets(&x, &y, plane_state, 1);
3488 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3489
3490 /* FIXME not quite sure how/if these apply to the chroma plane */
3491 if (w > max_width || h > max_height) {
3492 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3493 w, h, max_width, max_height);
3494 return -EINVAL;
3495 }
3496
3497 plane_state->color_plane[1].offset = offset;
3498 plane_state->color_plane[1].x = x;
3499 plane_state->color_plane[1].y = y;
3500
3501 return 0;
3502 }
3503
3504 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3505 {
3506 const struct drm_framebuffer *fb = plane_state->base.fb;
3507 int src_x = plane_state->base.src.x1 >> 16;
3508 int src_y = plane_state->base.src.y1 >> 16;
3509 int hsub = fb->format->hsub;
3510 int vsub = fb->format->vsub;
3511 int x = src_x / hsub;
3512 int y = src_y / vsub;
3513 u32 offset;
3514
3515 intel_add_fb_offsets(&x, &y, plane_state, 1);
3516 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3517
3518 plane_state->color_plane[1].offset = offset;
3519 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3520 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3521
3522 return 0;
3523 }
3524
3525 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3526 {
3527 const struct drm_framebuffer *fb = plane_state->base.fb;
3528 int ret;
3529
3530 ret = intel_plane_compute_gtt(plane_state);
3531 if (ret)
3532 return ret;
3533
3534 if (!plane_state->base.visible)
3535 return 0;
3536
3537 /*
3538 * Handle the AUX surface first since
3539 * the main surface setup depends on it.
3540 */
3541 if (is_planar_yuv_format(fb->format->format)) {
3542 ret = skl_check_nv12_aux_surface(plane_state);
3543 if (ret)
3544 return ret;
3545 } else if (is_ccs_modifier(fb->modifier)) {
3546 ret = skl_check_ccs_aux_surface(plane_state);
3547 if (ret)
3548 return ret;
3549 } else {
3550 plane_state->color_plane[1].offset = ~0xfff;
3551 plane_state->color_plane[1].x = 0;
3552 plane_state->color_plane[1].y = 0;
3553 }
3554
3555 ret = skl_check_main_surface(plane_state);
3556 if (ret)
3557 return ret;
3558
3559 return 0;
3560 }
3561
3562 unsigned int
3563 i9xx_plane_max_stride(struct intel_plane *plane,
3564 u32 pixel_format, u64 modifier,
3565 unsigned int rotation)
3566 {
3567 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3568
3569 if (!HAS_GMCH(dev_priv)) {
3570 return 32*1024;
3571 } else if (INTEL_GEN(dev_priv) >= 4) {
3572 if (modifier == I915_FORMAT_MOD_X_TILED)
3573 return 16*1024;
3574 else
3575 return 32*1024;
3576 } else if (INTEL_GEN(dev_priv) >= 3) {
3577 if (modifier == I915_FORMAT_MOD_X_TILED)
3578 return 8*1024;
3579 else
3580 return 16*1024;
3581 } else {
3582 if (plane->i9xx_plane == PLANE_C)
3583 return 4*1024;
3584 else
3585 return 8*1024;
3586 }
3587 }
3588
3589 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3590 {
3591 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3593 u32 dspcntr = 0;
3594
3595 if (crtc_state->gamma_enable)
3596 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3597
3598 if (crtc_state->csc_enable)
3599 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3600
3601 if (INTEL_GEN(dev_priv) < 5)
3602 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3603
3604 return dspcntr;
3605 }
3606
3607 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3608 const struct intel_plane_state *plane_state)
3609 {
3610 struct drm_i915_private *dev_priv =
3611 to_i915(plane_state->base.plane->dev);
3612 const struct drm_framebuffer *fb = plane_state->base.fb;
3613 unsigned int rotation = plane_state->base.rotation;
3614 u32 dspcntr;
3615
3616 dspcntr = DISPLAY_PLANE_ENABLE;
3617
3618 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3619 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3620 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3621
3622 switch (fb->format->format) {
3623 case DRM_FORMAT_C8:
3624 dspcntr |= DISPPLANE_8BPP;
3625 break;
3626 case DRM_FORMAT_XRGB1555:
3627 dspcntr |= DISPPLANE_BGRX555;
3628 break;
3629 case DRM_FORMAT_RGB565:
3630 dspcntr |= DISPPLANE_BGRX565;
3631 break;
3632 case DRM_FORMAT_XRGB8888:
3633 dspcntr |= DISPPLANE_BGRX888;
3634 break;
3635 case DRM_FORMAT_XBGR8888:
3636 dspcntr |= DISPPLANE_RGBX888;
3637 break;
3638 case DRM_FORMAT_XRGB2101010:
3639 dspcntr |= DISPPLANE_BGRX101010;
3640 break;
3641 case DRM_FORMAT_XBGR2101010:
3642 dspcntr |= DISPPLANE_RGBX101010;
3643 break;
3644 default:
3645 MISSING_CASE(fb->format->format);
3646 return 0;
3647 }
3648
3649 if (INTEL_GEN(dev_priv) >= 4 &&
3650 fb->modifier == I915_FORMAT_MOD_X_TILED)
3651 dspcntr |= DISPPLANE_TILED;
3652
3653 if (rotation & DRM_MODE_ROTATE_180)
3654 dspcntr |= DISPPLANE_ROTATE_180;
3655
3656 if (rotation & DRM_MODE_REFLECT_X)
3657 dspcntr |= DISPPLANE_MIRROR;
3658
3659 return dspcntr;
3660 }
3661
3662 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3663 {
3664 struct drm_i915_private *dev_priv =
3665 to_i915(plane_state->base.plane->dev);
3666 int src_x, src_y;
3667 u32 offset;
3668 int ret;
3669
3670 ret = intel_plane_compute_gtt(plane_state);
3671 if (ret)
3672 return ret;
3673
3674 if (!plane_state->base.visible)
3675 return 0;
3676
3677 src_x = plane_state->base.src.x1 >> 16;
3678 src_y = plane_state->base.src.y1 >> 16;
3679
3680 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3681
3682 if (INTEL_GEN(dev_priv) >= 4)
3683 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3684 plane_state, 0);
3685 else
3686 offset = 0;
3687
3688 /*
3689 * Put the final coordinates back so that the src
3690 * coordinate checks will see the right values.
3691 */
3692 drm_rect_translate(&plane_state->base.src,
3693 (src_x << 16) - plane_state->base.src.x1,
3694 (src_y << 16) - plane_state->base.src.y1);
3695
3696 /* HSW/BDW do this automagically in hardware */
3697 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3698 unsigned int rotation = plane_state->base.rotation;
3699 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3700 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3701
3702 if (rotation & DRM_MODE_ROTATE_180) {
3703 src_x += src_w - 1;
3704 src_y += src_h - 1;
3705 } else if (rotation & DRM_MODE_REFLECT_X) {
3706 src_x += src_w - 1;
3707 }
3708 }
3709
3710 plane_state->color_plane[0].offset = offset;
3711 plane_state->color_plane[0].x = src_x;
3712 plane_state->color_plane[0].y = src_y;
3713
3714 return 0;
3715 }
3716
3717 static int
3718 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3719 struct intel_plane_state *plane_state)
3720 {
3721 int ret;
3722
3723 ret = chv_plane_check_rotation(plane_state);
3724 if (ret)
3725 return ret;
3726
3727 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3728 &crtc_state->base,
3729 DRM_PLANE_HELPER_NO_SCALING,
3730 DRM_PLANE_HELPER_NO_SCALING,
3731 false, true);
3732 if (ret)
3733 return ret;
3734
3735 ret = i9xx_check_plane_surface(plane_state);
3736 if (ret)
3737 return ret;
3738
3739 if (!plane_state->base.visible)
3740 return 0;
3741
3742 ret = intel_plane_check_src_coordinates(plane_state);
3743 if (ret)
3744 return ret;
3745
3746 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3747
3748 return 0;
3749 }
3750
3751 static void i9xx_update_plane(struct intel_plane *plane,
3752 const struct intel_crtc_state *crtc_state,
3753 const struct intel_plane_state *plane_state)
3754 {
3755 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3756 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3757 u32 linear_offset;
3758 int x = plane_state->color_plane[0].x;
3759 int y = plane_state->color_plane[0].y;
3760 unsigned long irqflags;
3761 u32 dspaddr_offset;
3762 u32 dspcntr;
3763
3764 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3765
3766 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3767
3768 if (INTEL_GEN(dev_priv) >= 4)
3769 dspaddr_offset = plane_state->color_plane[0].offset;
3770 else
3771 dspaddr_offset = linear_offset;
3772
3773 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3774
3775 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3776
3777 if (INTEL_GEN(dev_priv) < 4) {
3778 /* pipesrc and dspsize control the size that is scaled from,
3779 * which should always be the user's requested size.
3780 */
3781 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3782 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3783 ((crtc_state->pipe_src_h - 1) << 16) |
3784 (crtc_state->pipe_src_w - 1));
3785 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3786 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3787 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3788 ((crtc_state->pipe_src_h - 1) << 16) |
3789 (crtc_state->pipe_src_w - 1));
3790 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3791 }
3792
3793 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3794 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3795 } else if (INTEL_GEN(dev_priv) >= 4) {
3796 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3797 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3798 }
3799
3800 /*
3801 * The control register self-arms if the plane was previously
3802 * disabled. Try to make the plane enable atomic by writing
3803 * the control register just before the surface register.
3804 */
3805 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3806 if (INTEL_GEN(dev_priv) >= 4)
3807 I915_WRITE_FW(DSPSURF(i9xx_plane),
3808 intel_plane_ggtt_offset(plane_state) +
3809 dspaddr_offset);
3810 else
3811 I915_WRITE_FW(DSPADDR(i9xx_plane),
3812 intel_plane_ggtt_offset(plane_state) +
3813 dspaddr_offset);
3814
3815 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3816 }
3817
3818 static void i9xx_disable_plane(struct intel_plane *plane,
3819 const struct intel_crtc_state *crtc_state)
3820 {
3821 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3822 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3823 unsigned long irqflags;
3824 u32 dspcntr;
3825
3826 /*
3827 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3828 * enable on ilk+ affect the pipe bottom color as
3829 * well, so we must configure them even if the plane
3830 * is disabled.
3831 *
3832 * On pre-g4x there is no way to gamma correct the
3833 * pipe bottom color but we'll keep on doing this
3834 * anyway so that the crtc state readout works correctly.
3835 */
3836 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3837
3838 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3839
3840 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3841 if (INTEL_GEN(dev_priv) >= 4)
3842 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3843 else
3844 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3845
3846 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3847 }
3848
3849 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3850 enum pipe *pipe)
3851 {
3852 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3853 enum intel_display_power_domain power_domain;
3854 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3855 intel_wakeref_t wakeref;
3856 bool ret;
3857 u32 val;
3858
3859 /*
3860 * Not 100% correct for planes that can move between pipes,
3861 * but that's only the case for gen2-4 which don't have any
3862 * display power wells.
3863 */
3864 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3865 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3866 if (!wakeref)
3867 return false;
3868
3869 val = I915_READ(DSPCNTR(i9xx_plane));
3870
3871 ret = val & DISPLAY_PLANE_ENABLE;
3872
3873 if (INTEL_GEN(dev_priv) >= 5)
3874 *pipe = plane->pipe;
3875 else
3876 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3877 DISPPLANE_SEL_PIPE_SHIFT;
3878
3879 intel_display_power_put(dev_priv, power_domain, wakeref);
3880
3881 return ret;
3882 }
3883
3884 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3885 {
3886 struct drm_device *dev = intel_crtc->base.dev;
3887 struct drm_i915_private *dev_priv = to_i915(dev);
3888
3889 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3890 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3891 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3892 }
3893
3894 /*
3895 * This function detaches (aka. unbinds) unused scalers in hardware
3896 */
3897 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3898 {
3899 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3900 const struct intel_crtc_scaler_state *scaler_state =
3901 &crtc_state->scaler_state;
3902 int i;
3903
3904 /* loop through and disable scalers that aren't in use */
3905 for (i = 0; i < intel_crtc->num_scalers; i++) {
3906 if (!scaler_state->scalers[i].in_use)
3907 skl_detach_scaler(intel_crtc, i);
3908 }
3909 }
3910
3911 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3912 int color_plane, unsigned int rotation)
3913 {
3914 /*
3915 * The stride is either expressed as a multiple of 64 bytes chunks for
3916 * linear buffers or in number of tiles for tiled buffers.
3917 */
3918 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3919 return 64;
3920 else if (drm_rotation_90_or_270(rotation))
3921 return intel_tile_height(fb, color_plane);
3922 else
3923 return intel_tile_width_bytes(fb, color_plane);
3924 }
3925
3926 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3927 int color_plane)
3928 {
3929 const struct drm_framebuffer *fb = plane_state->base.fb;
3930 unsigned int rotation = plane_state->base.rotation;
3931 u32 stride = plane_state->color_plane[color_plane].stride;
3932
3933 if (color_plane >= fb->format->num_planes)
3934 return 0;
3935
3936 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3937 }
3938
3939 static u32 skl_plane_ctl_format(u32 pixel_format)
3940 {
3941 switch (pixel_format) {
3942 case DRM_FORMAT_C8:
3943 return PLANE_CTL_FORMAT_INDEXED;
3944 case DRM_FORMAT_RGB565:
3945 return PLANE_CTL_FORMAT_RGB_565;
3946 case DRM_FORMAT_XBGR8888:
3947 case DRM_FORMAT_ABGR8888:
3948 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3949 case DRM_FORMAT_XRGB8888:
3950 case DRM_FORMAT_ARGB8888:
3951 return PLANE_CTL_FORMAT_XRGB_8888;
3952 case DRM_FORMAT_XRGB2101010:
3953 return PLANE_CTL_FORMAT_XRGB_2101010;
3954 case DRM_FORMAT_XBGR2101010:
3955 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3956 case DRM_FORMAT_XBGR16161616F:
3957 case DRM_FORMAT_ABGR16161616F:
3958 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3959 case DRM_FORMAT_XRGB16161616F:
3960 case DRM_FORMAT_ARGB16161616F:
3961 return PLANE_CTL_FORMAT_XRGB_16161616F;
3962 case DRM_FORMAT_YUYV:
3963 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3964 case DRM_FORMAT_YVYU:
3965 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3966 case DRM_FORMAT_UYVY:
3967 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3968 case DRM_FORMAT_VYUY:
3969 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3970 case DRM_FORMAT_NV12:
3971 return PLANE_CTL_FORMAT_NV12;
3972 case DRM_FORMAT_P010:
3973 return PLANE_CTL_FORMAT_P010;
3974 case DRM_FORMAT_P012:
3975 return PLANE_CTL_FORMAT_P012;
3976 case DRM_FORMAT_P016:
3977 return PLANE_CTL_FORMAT_P016;
3978 case DRM_FORMAT_Y210:
3979 return PLANE_CTL_FORMAT_Y210;
3980 case DRM_FORMAT_Y212:
3981 return PLANE_CTL_FORMAT_Y212;
3982 case DRM_FORMAT_Y216:
3983 return PLANE_CTL_FORMAT_Y216;
3984 case DRM_FORMAT_XVYU2101010:
3985 return PLANE_CTL_FORMAT_Y410;
3986 case DRM_FORMAT_XVYU12_16161616:
3987 return PLANE_CTL_FORMAT_Y412;
3988 case DRM_FORMAT_XVYU16161616:
3989 return PLANE_CTL_FORMAT_Y416;
3990 default:
3991 MISSING_CASE(pixel_format);
3992 }
3993
3994 return 0;
3995 }
3996
3997 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3998 {
3999 if (!plane_state->base.fb->format->has_alpha)
4000 return PLANE_CTL_ALPHA_DISABLE;
4001
4002 switch (plane_state->base.pixel_blend_mode) {
4003 case DRM_MODE_BLEND_PIXEL_NONE:
4004 return PLANE_CTL_ALPHA_DISABLE;
4005 case DRM_MODE_BLEND_PREMULTI:
4006 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4007 case DRM_MODE_BLEND_COVERAGE:
4008 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4009 default:
4010 MISSING_CASE(plane_state->base.pixel_blend_mode);
4011 return PLANE_CTL_ALPHA_DISABLE;
4012 }
4013 }
4014
4015 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4016 {
4017 if (!plane_state->base.fb->format->has_alpha)
4018 return PLANE_COLOR_ALPHA_DISABLE;
4019
4020 switch (plane_state->base.pixel_blend_mode) {
4021 case DRM_MODE_BLEND_PIXEL_NONE:
4022 return PLANE_COLOR_ALPHA_DISABLE;
4023 case DRM_MODE_BLEND_PREMULTI:
4024 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4025 case DRM_MODE_BLEND_COVERAGE:
4026 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4027 default:
4028 MISSING_CASE(plane_state->base.pixel_blend_mode);
4029 return PLANE_COLOR_ALPHA_DISABLE;
4030 }
4031 }
4032
4033 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4034 {
4035 switch (fb_modifier) {
4036 case DRM_FORMAT_MOD_LINEAR:
4037 break;
4038 case I915_FORMAT_MOD_X_TILED:
4039 return PLANE_CTL_TILED_X;
4040 case I915_FORMAT_MOD_Y_TILED:
4041 return PLANE_CTL_TILED_Y;
4042 case I915_FORMAT_MOD_Y_TILED_CCS:
4043 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4044 case I915_FORMAT_MOD_Yf_TILED:
4045 return PLANE_CTL_TILED_YF;
4046 case I915_FORMAT_MOD_Yf_TILED_CCS:
4047 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4048 default:
4049 MISSING_CASE(fb_modifier);
4050 }
4051
4052 return 0;
4053 }
4054
4055 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4056 {
4057 switch (rotate) {
4058 case DRM_MODE_ROTATE_0:
4059 break;
4060 /*
4061 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4062 * while i915 HW rotation is clockwise, thats why this swapping.
4063 */
4064 case DRM_MODE_ROTATE_90:
4065 return PLANE_CTL_ROTATE_270;
4066 case DRM_MODE_ROTATE_180:
4067 return PLANE_CTL_ROTATE_180;
4068 case DRM_MODE_ROTATE_270:
4069 return PLANE_CTL_ROTATE_90;
4070 default:
4071 MISSING_CASE(rotate);
4072 }
4073
4074 return 0;
4075 }
4076
4077 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4078 {
4079 switch (reflect) {
4080 case 0:
4081 break;
4082 case DRM_MODE_REFLECT_X:
4083 return PLANE_CTL_FLIP_HORIZONTAL;
4084 case DRM_MODE_REFLECT_Y:
4085 default:
4086 MISSING_CASE(reflect);
4087 }
4088
4089 return 0;
4090 }
4091
4092 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4093 {
4094 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4095 u32 plane_ctl = 0;
4096
4097 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4098 return plane_ctl;
4099
4100 if (crtc_state->gamma_enable)
4101 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4102
4103 if (crtc_state->csc_enable)
4104 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4105
4106 return plane_ctl;
4107 }
4108
4109 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4110 const struct intel_plane_state *plane_state)
4111 {
4112 struct drm_i915_private *dev_priv =
4113 to_i915(plane_state->base.plane->dev);
4114 const struct drm_framebuffer *fb = plane_state->base.fb;
4115 unsigned int rotation = plane_state->base.rotation;
4116 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4117 u32 plane_ctl;
4118
4119 plane_ctl = PLANE_CTL_ENABLE;
4120
4121 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4122 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4123 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4124
4125 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4126 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4127
4128 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4129 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4130 }
4131
4132 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4133 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4134 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4135
4136 if (INTEL_GEN(dev_priv) >= 10)
4137 plane_ctl |= cnl_plane_ctl_flip(rotation &
4138 DRM_MODE_REFLECT_MASK);
4139
4140 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4141 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4142 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4143 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4144
4145 return plane_ctl;
4146 }
4147
4148 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4149 {
4150 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4151 u32 plane_color_ctl = 0;
4152
4153 if (INTEL_GEN(dev_priv) >= 11)
4154 return plane_color_ctl;
4155
4156 if (crtc_state->gamma_enable)
4157 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4158
4159 if (crtc_state->csc_enable)
4160 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4161
4162 return plane_color_ctl;
4163 }
4164
4165 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4166 const struct intel_plane_state *plane_state)
4167 {
4168 struct drm_i915_private *dev_priv =
4169 to_i915(plane_state->base.plane->dev);
4170 const struct drm_framebuffer *fb = plane_state->base.fb;
4171 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4172 u32 plane_color_ctl = 0;
4173
4174 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4175 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4176
4177 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4178 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4179 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4180 else
4181 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4182
4183 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4184 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4185 } else if (fb->format->is_yuv) {
4186 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4187 }
4188
4189 return plane_color_ctl;
4190 }
4191
4192 static int
4193 __intel_display_resume(struct drm_device *dev,
4194 struct drm_atomic_state *state,
4195 struct drm_modeset_acquire_ctx *ctx)
4196 {
4197 struct drm_crtc_state *crtc_state;
4198 struct drm_crtc *crtc;
4199 int i, ret;
4200
4201 intel_modeset_setup_hw_state(dev, ctx);
4202 i915_redisable_vga(to_i915(dev));
4203
4204 if (!state)
4205 return 0;
4206
4207 /*
4208 * We've duplicated the state, pointers to the old state are invalid.
4209 *
4210 * Don't attempt to use the old state until we commit the duplicated state.
4211 */
4212 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4213 /*
4214 * Force recalculation even if we restore
4215 * current state. With fast modeset this may not result
4216 * in a modeset when the state is compatible.
4217 */
4218 crtc_state->mode_changed = true;
4219 }
4220
4221 /* ignore any reset values/BIOS leftovers in the WM registers */
4222 if (!HAS_GMCH(to_i915(dev)))
4223 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4224
4225 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4226
4227 WARN_ON(ret == -EDEADLK);
4228 return ret;
4229 }
4230
4231 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4232 {
4233 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4234 intel_has_gpu_reset(dev_priv));
4235 }
4236
4237 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4238 {
4239 struct drm_device *dev = &dev_priv->drm;
4240 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4241 struct drm_atomic_state *state;
4242 int ret;
4243
4244 /* reset doesn't touch the display */
4245 if (!i915_modparams.force_reset_modeset_test &&
4246 !gpu_reset_clobbers_display(dev_priv))
4247 return;
4248
4249 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4250 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4251 wake_up_all(&dev_priv->gpu_error.wait_queue);
4252
4253 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4254 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4255 i915_gem_set_wedged(dev_priv);
4256 }
4257
4258 /*
4259 * Need mode_config.mutex so that we don't
4260 * trample ongoing ->detect() and whatnot.
4261 */
4262 mutex_lock(&dev->mode_config.mutex);
4263 drm_modeset_acquire_init(ctx, 0);
4264 while (1) {
4265 ret = drm_modeset_lock_all_ctx(dev, ctx);
4266 if (ret != -EDEADLK)
4267 break;
4268
4269 drm_modeset_backoff(ctx);
4270 }
4271 /*
4272 * Disabling the crtcs gracefully seems nicer. Also the
4273 * g33 docs say we should at least disable all the planes.
4274 */
4275 state = drm_atomic_helper_duplicate_state(dev, ctx);
4276 if (IS_ERR(state)) {
4277 ret = PTR_ERR(state);
4278 DRM_ERROR("Duplicating state failed with %i\n", ret);
4279 return;
4280 }
4281
4282 ret = drm_atomic_helper_disable_all(dev, ctx);
4283 if (ret) {
4284 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4285 drm_atomic_state_put(state);
4286 return;
4287 }
4288
4289 dev_priv->modeset_restore_state = state;
4290 state->acquire_ctx = ctx;
4291 }
4292
4293 void intel_finish_reset(struct drm_i915_private *dev_priv)
4294 {
4295 struct drm_device *dev = &dev_priv->drm;
4296 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4297 struct drm_atomic_state *state;
4298 int ret;
4299
4300 /* reset doesn't touch the display */
4301 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
4302 return;
4303
4304 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4305 if (!state)
4306 goto unlock;
4307
4308 /* reset doesn't touch the display */
4309 if (!gpu_reset_clobbers_display(dev_priv)) {
4310 /* for testing only restore the display */
4311 ret = __intel_display_resume(dev, state, ctx);
4312 if (ret)
4313 DRM_ERROR("Restoring old state failed with %i\n", ret);
4314 } else {
4315 /*
4316 * The display has been reset as well,
4317 * so need a full re-initialization.
4318 */
4319 intel_pps_unlock_regs_wa(dev_priv);
4320 intel_modeset_init_hw(dev);
4321 intel_init_clock_gating(dev_priv);
4322
4323 spin_lock_irq(&dev_priv->irq_lock);
4324 if (dev_priv->display.hpd_irq_setup)
4325 dev_priv->display.hpd_irq_setup(dev_priv);
4326 spin_unlock_irq(&dev_priv->irq_lock);
4327
4328 ret = __intel_display_resume(dev, state, ctx);
4329 if (ret)
4330 DRM_ERROR("Restoring old state failed with %i\n", ret);
4331
4332 intel_hpd_init(dev_priv);
4333 }
4334
4335 drm_atomic_state_put(state);
4336 unlock:
4337 drm_modeset_drop_locks(ctx);
4338 drm_modeset_acquire_fini(ctx);
4339 mutex_unlock(&dev->mode_config.mutex);
4340
4341 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4342 }
4343
4344 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4345 {
4346 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4347 enum pipe pipe = crtc->pipe;
4348 u32 tmp;
4349
4350 tmp = I915_READ(PIPE_CHICKEN(pipe));
4351
4352 /*
4353 * Display WA #1153: icl
4354 * enable hardware to bypass the alpha math
4355 * and rounding for per-pixel values 00 and 0xff
4356 */
4357 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4358 /*
4359 * Display WA # 1605353570: icl
4360 * Set the pixel rounding bit to 1 for allowing
4361 * passthrough of Frame buffer pixels unmodified
4362 * across pipe
4363 */
4364 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4365 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4366 }
4367
4368 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4369 const struct intel_crtc_state *new_crtc_state)
4370 {
4371 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4372 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4373
4374 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4375 crtc->base.mode = new_crtc_state->base.mode;
4376
4377 /*
4378 * Update pipe size and adjust fitter if needed: the reason for this is
4379 * that in compute_mode_changes we check the native mode (not the pfit
4380 * mode) to see if we can flip rather than do a full mode set. In the
4381 * fastboot case, we'll flip, but if we don't update the pipesrc and
4382 * pfit state, we'll end up with a big fb scanned out into the wrong
4383 * sized surface.
4384 */
4385
4386 I915_WRITE(PIPESRC(crtc->pipe),
4387 ((new_crtc_state->pipe_src_w - 1) << 16) |
4388 (new_crtc_state->pipe_src_h - 1));
4389
4390 /* on skylake this is done by detaching scalers */
4391 if (INTEL_GEN(dev_priv) >= 9) {
4392 skl_detach_scalers(new_crtc_state);
4393
4394 if (new_crtc_state->pch_pfit.enabled)
4395 skylake_pfit_enable(new_crtc_state);
4396 } else if (HAS_PCH_SPLIT(dev_priv)) {
4397 if (new_crtc_state->pch_pfit.enabled)
4398 ironlake_pfit_enable(new_crtc_state);
4399 else if (old_crtc_state->pch_pfit.enabled)
4400 ironlake_pfit_disable(old_crtc_state);
4401 }
4402
4403 if (INTEL_GEN(dev_priv) >= 11)
4404 icl_set_pipe_chicken(crtc);
4405 }
4406
4407 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4408 {
4409 struct drm_device *dev = crtc->base.dev;
4410 struct drm_i915_private *dev_priv = to_i915(dev);
4411 int pipe = crtc->pipe;
4412 i915_reg_t reg;
4413 u32 temp;
4414
4415 /* enable normal train */
4416 reg = FDI_TX_CTL(pipe);
4417 temp = I915_READ(reg);
4418 if (IS_IVYBRIDGE(dev_priv)) {
4419 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4420 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4421 } else {
4422 temp &= ~FDI_LINK_TRAIN_NONE;
4423 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4424 }
4425 I915_WRITE(reg, temp);
4426
4427 reg = FDI_RX_CTL(pipe);
4428 temp = I915_READ(reg);
4429 if (HAS_PCH_CPT(dev_priv)) {
4430 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4431 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4432 } else {
4433 temp &= ~FDI_LINK_TRAIN_NONE;
4434 temp |= FDI_LINK_TRAIN_NONE;
4435 }
4436 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4437
4438 /* wait one idle pattern time */
4439 POSTING_READ(reg);
4440 udelay(1000);
4441
4442 /* IVB wants error correction enabled */
4443 if (IS_IVYBRIDGE(dev_priv))
4444 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4445 FDI_FE_ERRC_ENABLE);
4446 }
4447
4448 /* The FDI link training functions for ILK/Ibexpeak. */
4449 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4450 const struct intel_crtc_state *crtc_state)
4451 {
4452 struct drm_device *dev = crtc->base.dev;
4453 struct drm_i915_private *dev_priv = to_i915(dev);
4454 int pipe = crtc->pipe;
4455 i915_reg_t reg;
4456 u32 temp, tries;
4457
4458 /* FDI needs bits from pipe first */
4459 assert_pipe_enabled(dev_priv, pipe);
4460
4461 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4462 for train result */
4463 reg = FDI_RX_IMR(pipe);
4464 temp = I915_READ(reg);
4465 temp &= ~FDI_RX_SYMBOL_LOCK;
4466 temp &= ~FDI_RX_BIT_LOCK;
4467 I915_WRITE(reg, temp);
4468 I915_READ(reg);
4469 udelay(150);
4470
4471 /* enable CPU FDI TX and PCH FDI RX */
4472 reg = FDI_TX_CTL(pipe);
4473 temp = I915_READ(reg);
4474 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4475 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4476 temp &= ~FDI_LINK_TRAIN_NONE;
4477 temp |= FDI_LINK_TRAIN_PATTERN_1;
4478 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4479
4480 reg = FDI_RX_CTL(pipe);
4481 temp = I915_READ(reg);
4482 temp &= ~FDI_LINK_TRAIN_NONE;
4483 temp |= FDI_LINK_TRAIN_PATTERN_1;
4484 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4485
4486 POSTING_READ(reg);
4487 udelay(150);
4488
4489 /* Ironlake workaround, enable clock pointer after FDI enable*/
4490 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4491 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4492 FDI_RX_PHASE_SYNC_POINTER_EN);
4493
4494 reg = FDI_RX_IIR(pipe);
4495 for (tries = 0; tries < 5; tries++) {
4496 temp = I915_READ(reg);
4497 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4498
4499 if ((temp & FDI_RX_BIT_LOCK)) {
4500 DRM_DEBUG_KMS("FDI train 1 done.\n");
4501 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4502 break;
4503 }
4504 }
4505 if (tries == 5)
4506 DRM_ERROR("FDI train 1 fail!\n");
4507
4508 /* Train 2 */
4509 reg = FDI_TX_CTL(pipe);
4510 temp = I915_READ(reg);
4511 temp &= ~FDI_LINK_TRAIN_NONE;
4512 temp |= FDI_LINK_TRAIN_PATTERN_2;
4513 I915_WRITE(reg, temp);
4514
4515 reg = FDI_RX_CTL(pipe);
4516 temp = I915_READ(reg);
4517 temp &= ~FDI_LINK_TRAIN_NONE;
4518 temp |= FDI_LINK_TRAIN_PATTERN_2;
4519 I915_WRITE(reg, temp);
4520
4521 POSTING_READ(reg);
4522 udelay(150);
4523
4524 reg = FDI_RX_IIR(pipe);
4525 for (tries = 0; tries < 5; tries++) {
4526 temp = I915_READ(reg);
4527 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4528
4529 if (temp & FDI_RX_SYMBOL_LOCK) {
4530 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4531 DRM_DEBUG_KMS("FDI train 2 done.\n");
4532 break;
4533 }
4534 }
4535 if (tries == 5)
4536 DRM_ERROR("FDI train 2 fail!\n");
4537
4538 DRM_DEBUG_KMS("FDI train done\n");
4539
4540 }
4541
4542 static const int snb_b_fdi_train_param[] = {
4543 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4544 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4545 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4546 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4547 };
4548
4549 /* The FDI link training functions for SNB/Cougarpoint. */
4550 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4551 const struct intel_crtc_state *crtc_state)
4552 {
4553 struct drm_device *dev = crtc->base.dev;
4554 struct drm_i915_private *dev_priv = to_i915(dev);
4555 int pipe = crtc->pipe;
4556 i915_reg_t reg;
4557 u32 temp, i, retry;
4558
4559 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4560 for train result */
4561 reg = FDI_RX_IMR(pipe);
4562 temp = I915_READ(reg);
4563 temp &= ~FDI_RX_SYMBOL_LOCK;
4564 temp &= ~FDI_RX_BIT_LOCK;
4565 I915_WRITE(reg, temp);
4566
4567 POSTING_READ(reg);
4568 udelay(150);
4569
4570 /* enable CPU FDI TX and PCH FDI RX */
4571 reg = FDI_TX_CTL(pipe);
4572 temp = I915_READ(reg);
4573 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4574 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4575 temp &= ~FDI_LINK_TRAIN_NONE;
4576 temp |= FDI_LINK_TRAIN_PATTERN_1;
4577 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4578 /* SNB-B */
4579 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4580 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4581
4582 I915_WRITE(FDI_RX_MISC(pipe),
4583 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4584
4585 reg = FDI_RX_CTL(pipe);
4586 temp = I915_READ(reg);
4587 if (HAS_PCH_CPT(dev_priv)) {
4588 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4589 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4590 } else {
4591 temp &= ~FDI_LINK_TRAIN_NONE;
4592 temp |= FDI_LINK_TRAIN_PATTERN_1;
4593 }
4594 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4595
4596 POSTING_READ(reg);
4597 udelay(150);
4598
4599 for (i = 0; i < 4; i++) {
4600 reg = FDI_TX_CTL(pipe);
4601 temp = I915_READ(reg);
4602 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4603 temp |= snb_b_fdi_train_param[i];
4604 I915_WRITE(reg, temp);
4605
4606 POSTING_READ(reg);
4607 udelay(500);
4608
4609 for (retry = 0; retry < 5; retry++) {
4610 reg = FDI_RX_IIR(pipe);
4611 temp = I915_READ(reg);
4612 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4613 if (temp & FDI_RX_BIT_LOCK) {
4614 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4615 DRM_DEBUG_KMS("FDI train 1 done.\n");
4616 break;
4617 }
4618 udelay(50);
4619 }
4620 if (retry < 5)
4621 break;
4622 }
4623 if (i == 4)
4624 DRM_ERROR("FDI train 1 fail!\n");
4625
4626 /* Train 2 */
4627 reg = FDI_TX_CTL(pipe);
4628 temp = I915_READ(reg);
4629 temp &= ~FDI_LINK_TRAIN_NONE;
4630 temp |= FDI_LINK_TRAIN_PATTERN_2;
4631 if (IS_GEN(dev_priv, 6)) {
4632 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4633 /* SNB-B */
4634 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4635 }
4636 I915_WRITE(reg, temp);
4637
4638 reg = FDI_RX_CTL(pipe);
4639 temp = I915_READ(reg);
4640 if (HAS_PCH_CPT(dev_priv)) {
4641 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4642 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4643 } else {
4644 temp &= ~FDI_LINK_TRAIN_NONE;
4645 temp |= FDI_LINK_TRAIN_PATTERN_2;
4646 }
4647 I915_WRITE(reg, temp);
4648
4649 POSTING_READ(reg);
4650 udelay(150);
4651
4652 for (i = 0; i < 4; i++) {
4653 reg = FDI_TX_CTL(pipe);
4654 temp = I915_READ(reg);
4655 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4656 temp |= snb_b_fdi_train_param[i];
4657 I915_WRITE(reg, temp);
4658
4659 POSTING_READ(reg);
4660 udelay(500);
4661
4662 for (retry = 0; retry < 5; retry++) {
4663 reg = FDI_RX_IIR(pipe);
4664 temp = I915_READ(reg);
4665 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4666 if (temp & FDI_RX_SYMBOL_LOCK) {
4667 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4668 DRM_DEBUG_KMS("FDI train 2 done.\n");
4669 break;
4670 }
4671 udelay(50);
4672 }
4673 if (retry < 5)
4674 break;
4675 }
4676 if (i == 4)
4677 DRM_ERROR("FDI train 2 fail!\n");
4678
4679 DRM_DEBUG_KMS("FDI train done.\n");
4680 }
4681
4682 /* Manual link training for Ivy Bridge A0 parts */
4683 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4684 const struct intel_crtc_state *crtc_state)
4685 {
4686 struct drm_device *dev = crtc->base.dev;
4687 struct drm_i915_private *dev_priv = to_i915(dev);
4688 int pipe = crtc->pipe;
4689 i915_reg_t reg;
4690 u32 temp, i, j;
4691
4692 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4693 for train result */
4694 reg = FDI_RX_IMR(pipe);
4695 temp = I915_READ(reg);
4696 temp &= ~FDI_RX_SYMBOL_LOCK;
4697 temp &= ~FDI_RX_BIT_LOCK;
4698 I915_WRITE(reg, temp);
4699
4700 POSTING_READ(reg);
4701 udelay(150);
4702
4703 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4704 I915_READ(FDI_RX_IIR(pipe)));
4705
4706 /* Try each vswing and preemphasis setting twice before moving on */
4707 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4708 /* disable first in case we need to retry */
4709 reg = FDI_TX_CTL(pipe);
4710 temp = I915_READ(reg);
4711 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4712 temp &= ~FDI_TX_ENABLE;
4713 I915_WRITE(reg, temp);
4714
4715 reg = FDI_RX_CTL(pipe);
4716 temp = I915_READ(reg);
4717 temp &= ~FDI_LINK_TRAIN_AUTO;
4718 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4719 temp &= ~FDI_RX_ENABLE;
4720 I915_WRITE(reg, temp);
4721
4722 /* enable CPU FDI TX and PCH FDI RX */
4723 reg = FDI_TX_CTL(pipe);
4724 temp = I915_READ(reg);
4725 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4726 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4727 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4728 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4729 temp |= snb_b_fdi_train_param[j/2];
4730 temp |= FDI_COMPOSITE_SYNC;
4731 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4732
4733 I915_WRITE(FDI_RX_MISC(pipe),
4734 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4735
4736 reg = FDI_RX_CTL(pipe);
4737 temp = I915_READ(reg);
4738 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4739 temp |= FDI_COMPOSITE_SYNC;
4740 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4741
4742 POSTING_READ(reg);
4743 udelay(1); /* should be 0.5us */
4744
4745 for (i = 0; i < 4; i++) {
4746 reg = FDI_RX_IIR(pipe);
4747 temp = I915_READ(reg);
4748 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4749
4750 if (temp & FDI_RX_BIT_LOCK ||
4751 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4752 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4753 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4754 i);
4755 break;
4756 }
4757 udelay(1); /* should be 0.5us */
4758 }
4759 if (i == 4) {
4760 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4761 continue;
4762 }
4763
4764 /* Train 2 */
4765 reg = FDI_TX_CTL(pipe);
4766 temp = I915_READ(reg);
4767 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4768 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4769 I915_WRITE(reg, temp);
4770
4771 reg = FDI_RX_CTL(pipe);
4772 temp = I915_READ(reg);
4773 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4774 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4775 I915_WRITE(reg, temp);
4776
4777 POSTING_READ(reg);
4778 udelay(2); /* should be 1.5us */
4779
4780 for (i = 0; i < 4; i++) {
4781 reg = FDI_RX_IIR(pipe);
4782 temp = I915_READ(reg);
4783 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4784
4785 if (temp & FDI_RX_SYMBOL_LOCK ||
4786 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4787 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4788 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4789 i);
4790 goto train_done;
4791 }
4792 udelay(2); /* should be 1.5us */
4793 }
4794 if (i == 4)
4795 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4796 }
4797
4798 train_done:
4799 DRM_DEBUG_KMS("FDI train done.\n");
4800 }
4801
4802 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4803 {
4804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4805 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4806 int pipe = intel_crtc->pipe;
4807 i915_reg_t reg;
4808 u32 temp;
4809
4810 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4811 reg = FDI_RX_CTL(pipe);
4812 temp = I915_READ(reg);
4813 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4814 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4815 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4816 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4817
4818 POSTING_READ(reg);
4819 udelay(200);
4820
4821 /* Switch from Rawclk to PCDclk */
4822 temp = I915_READ(reg);
4823 I915_WRITE(reg, temp | FDI_PCDCLK);
4824
4825 POSTING_READ(reg);
4826 udelay(200);
4827
4828 /* Enable CPU FDI TX PLL, always on for Ironlake */
4829 reg = FDI_TX_CTL(pipe);
4830 temp = I915_READ(reg);
4831 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4832 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4833
4834 POSTING_READ(reg);
4835 udelay(100);
4836 }
4837 }
4838
4839 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4840 {
4841 struct drm_device *dev = intel_crtc->base.dev;
4842 struct drm_i915_private *dev_priv = to_i915(dev);
4843 int pipe = intel_crtc->pipe;
4844 i915_reg_t reg;
4845 u32 temp;
4846
4847 /* Switch from PCDclk to Rawclk */
4848 reg = FDI_RX_CTL(pipe);
4849 temp = I915_READ(reg);
4850 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4851
4852 /* Disable CPU FDI TX PLL */
4853 reg = FDI_TX_CTL(pipe);
4854 temp = I915_READ(reg);
4855 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4856
4857 POSTING_READ(reg);
4858 udelay(100);
4859
4860 reg = FDI_RX_CTL(pipe);
4861 temp = I915_READ(reg);
4862 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4863
4864 /* Wait for the clocks to turn off. */
4865 POSTING_READ(reg);
4866 udelay(100);
4867 }
4868
4869 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4870 {
4871 struct drm_device *dev = crtc->dev;
4872 struct drm_i915_private *dev_priv = to_i915(dev);
4873 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4874 int pipe = intel_crtc->pipe;
4875 i915_reg_t reg;
4876 u32 temp;
4877
4878 /* disable CPU FDI tx and PCH FDI rx */
4879 reg = FDI_TX_CTL(pipe);
4880 temp = I915_READ(reg);
4881 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4882 POSTING_READ(reg);
4883
4884 reg = FDI_RX_CTL(pipe);
4885 temp = I915_READ(reg);
4886 temp &= ~(0x7 << 16);
4887 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4888 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4889
4890 POSTING_READ(reg);
4891 udelay(100);
4892
4893 /* Ironlake workaround, disable clock pointer after downing FDI */
4894 if (HAS_PCH_IBX(dev_priv))
4895 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4896
4897 /* still set train pattern 1 */
4898 reg = FDI_TX_CTL(pipe);
4899 temp = I915_READ(reg);
4900 temp &= ~FDI_LINK_TRAIN_NONE;
4901 temp |= FDI_LINK_TRAIN_PATTERN_1;
4902 I915_WRITE(reg, temp);
4903
4904 reg = FDI_RX_CTL(pipe);
4905 temp = I915_READ(reg);
4906 if (HAS_PCH_CPT(dev_priv)) {
4907 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4908 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4909 } else {
4910 temp &= ~FDI_LINK_TRAIN_NONE;
4911 temp |= FDI_LINK_TRAIN_PATTERN_1;
4912 }
4913 /* BPC in FDI rx is consistent with that in PIPECONF */
4914 temp &= ~(0x07 << 16);
4915 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4916 I915_WRITE(reg, temp);
4917
4918 POSTING_READ(reg);
4919 udelay(100);
4920 }
4921
4922 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4923 {
4924 struct drm_crtc *crtc;
4925 bool cleanup_done;
4926
4927 drm_for_each_crtc(crtc, &dev_priv->drm) {
4928 struct drm_crtc_commit *commit;
4929 spin_lock(&crtc->commit_lock);
4930 commit = list_first_entry_or_null(&crtc->commit_list,
4931 struct drm_crtc_commit, commit_entry);
4932 cleanup_done = commit ?
4933 try_wait_for_completion(&commit->cleanup_done) : true;
4934 spin_unlock(&crtc->commit_lock);
4935
4936 if (cleanup_done)
4937 continue;
4938
4939 drm_crtc_wait_one_vblank(crtc);
4940
4941 return true;
4942 }
4943
4944 return false;
4945 }
4946
4947 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4948 {
4949 u32 temp;
4950
4951 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4952
4953 mutex_lock(&dev_priv->sb_lock);
4954
4955 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4956 temp |= SBI_SSCCTL_DISABLE;
4957 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4958
4959 mutex_unlock(&dev_priv->sb_lock);
4960 }
4961
4962 /* Program iCLKIP clock to the desired frequency */
4963 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4964 {
4965 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4966 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4967 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4968 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4969 u32 temp;
4970
4971 lpt_disable_iclkip(dev_priv);
4972
4973 /* The iCLK virtual clock root frequency is in MHz,
4974 * but the adjusted_mode->crtc_clock in in KHz. To get the
4975 * divisors, it is necessary to divide one by another, so we
4976 * convert the virtual clock precision to KHz here for higher
4977 * precision.
4978 */
4979 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4980 u32 iclk_virtual_root_freq = 172800 * 1000;
4981 u32 iclk_pi_range = 64;
4982 u32 desired_divisor;
4983
4984 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4985 clock << auxdiv);
4986 divsel = (desired_divisor / iclk_pi_range) - 2;
4987 phaseinc = desired_divisor % iclk_pi_range;
4988
4989 /*
4990 * Near 20MHz is a corner case which is
4991 * out of range for the 7-bit divisor
4992 */
4993 if (divsel <= 0x7f)
4994 break;
4995 }
4996
4997 /* This should not happen with any sane values */
4998 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4999 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5000 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5001 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5002
5003 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5004 clock,
5005 auxdiv,
5006 divsel,
5007 phasedir,
5008 phaseinc);
5009
5010 mutex_lock(&dev_priv->sb_lock);
5011
5012 /* Program SSCDIVINTPHASE6 */
5013 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5014 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5015 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5016 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5017 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5018 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5019 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5020 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5021
5022 /* Program SSCAUXDIV */
5023 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5024 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5025 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5026 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5027
5028 /* Enable modulator and associated divider */
5029 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5030 temp &= ~SBI_SSCCTL_DISABLE;
5031 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5032
5033 mutex_unlock(&dev_priv->sb_lock);
5034
5035 /* Wait for initialization time */
5036 udelay(24);
5037
5038 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5039 }
5040
5041 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5042 {
5043 u32 divsel, phaseinc, auxdiv;
5044 u32 iclk_virtual_root_freq = 172800 * 1000;
5045 u32 iclk_pi_range = 64;
5046 u32 desired_divisor;
5047 u32 temp;
5048
5049 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5050 return 0;
5051
5052 mutex_lock(&dev_priv->sb_lock);
5053
5054 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5055 if (temp & SBI_SSCCTL_DISABLE) {
5056 mutex_unlock(&dev_priv->sb_lock);
5057 return 0;
5058 }
5059
5060 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5061 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5062 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5063 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5064 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5065
5066 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5067 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5068 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5069
5070 mutex_unlock(&dev_priv->sb_lock);
5071
5072 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5073
5074 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5075 desired_divisor << auxdiv);
5076 }
5077
5078 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5079 enum pipe pch_transcoder)
5080 {
5081 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5082 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5083 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5084
5085 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5086 I915_READ(HTOTAL(cpu_transcoder)));
5087 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5088 I915_READ(HBLANK(cpu_transcoder)));
5089 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5090 I915_READ(HSYNC(cpu_transcoder)));
5091
5092 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5093 I915_READ(VTOTAL(cpu_transcoder)));
5094 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5095 I915_READ(VBLANK(cpu_transcoder)));
5096 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5097 I915_READ(VSYNC(cpu_transcoder)));
5098 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5099 I915_READ(VSYNCSHIFT(cpu_transcoder)));
5100 }
5101
5102 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5103 {
5104 u32 temp;
5105
5106 temp = I915_READ(SOUTH_CHICKEN1);
5107 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5108 return;
5109
5110 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5111 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5112
5113 temp &= ~FDI_BC_BIFURCATION_SELECT;
5114 if (enable)
5115 temp |= FDI_BC_BIFURCATION_SELECT;
5116
5117 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5118 I915_WRITE(SOUTH_CHICKEN1, temp);
5119 POSTING_READ(SOUTH_CHICKEN1);
5120 }
5121
5122 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5123 {
5124 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5125 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5126
5127 switch (crtc->pipe) {
5128 case PIPE_A:
5129 break;
5130 case PIPE_B:
5131 if (crtc_state->fdi_lanes > 2)
5132 cpt_set_fdi_bc_bifurcation(dev_priv, false);
5133 else
5134 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5135
5136 break;
5137 case PIPE_C:
5138 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5139
5140 break;
5141 default:
5142 BUG();
5143 }
5144 }
5145
5146 /*
5147 * Finds the encoder associated with the given CRTC. This can only be
5148 * used when we know that the CRTC isn't feeding multiple encoders!
5149 */
5150 static struct intel_encoder *
5151 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5152 const struct intel_crtc_state *crtc_state)
5153 {
5154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5155 const struct drm_connector_state *connector_state;
5156 const struct drm_connector *connector;
5157 struct intel_encoder *encoder = NULL;
5158 int num_encoders = 0;
5159 int i;
5160
5161 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5162 if (connector_state->crtc != &crtc->base)
5163 continue;
5164
5165 encoder = to_intel_encoder(connector_state->best_encoder);
5166 num_encoders++;
5167 }
5168
5169 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5170 num_encoders, pipe_name(crtc->pipe));
5171
5172 return encoder;
5173 }
5174
5175 /*
5176 * Enable PCH resources required for PCH ports:
5177 * - PCH PLLs
5178 * - FDI training & RX/TX
5179 * - update transcoder timings
5180 * - DP transcoding bits
5181 * - transcoder
5182 */
5183 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5184 const struct intel_crtc_state *crtc_state)
5185 {
5186 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5187 struct drm_device *dev = crtc->base.dev;
5188 struct drm_i915_private *dev_priv = to_i915(dev);
5189 int pipe = crtc->pipe;
5190 u32 temp;
5191
5192 assert_pch_transcoder_disabled(dev_priv, pipe);
5193
5194 if (IS_IVYBRIDGE(dev_priv))
5195 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5196
5197 /* Write the TU size bits before fdi link training, so that error
5198 * detection works. */
5199 I915_WRITE(FDI_RX_TUSIZE1(pipe),
5200 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5201
5202 /* For PCH output, training FDI link */
5203 dev_priv->display.fdi_link_train(crtc, crtc_state);
5204
5205 /* We need to program the right clock selection before writing the pixel
5206 * mutliplier into the DPLL. */
5207 if (HAS_PCH_CPT(dev_priv)) {
5208 u32 sel;
5209
5210 temp = I915_READ(PCH_DPLL_SEL);
5211 temp |= TRANS_DPLL_ENABLE(pipe);
5212 sel = TRANS_DPLLB_SEL(pipe);
5213 if (crtc_state->shared_dpll ==
5214 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5215 temp |= sel;
5216 else
5217 temp &= ~sel;
5218 I915_WRITE(PCH_DPLL_SEL, temp);
5219 }
5220
5221 /* XXX: pch pll's can be enabled any time before we enable the PCH
5222 * transcoder, and we actually should do this to not upset any PCH
5223 * transcoder that already use the clock when we share it.
5224 *
5225 * Note that enable_shared_dpll tries to do the right thing, but
5226 * get_shared_dpll unconditionally resets the pll - we need that to have
5227 * the right LVDS enable sequence. */
5228 intel_enable_shared_dpll(crtc_state);
5229
5230 /* set transcoder timing, panel must allow it */
5231 assert_panel_unlocked(dev_priv, pipe);
5232 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5233
5234 intel_fdi_normal_train(crtc);
5235
5236 /* For PCH DP, enable TRANS_DP_CTL */
5237 if (HAS_PCH_CPT(dev_priv) &&
5238 intel_crtc_has_dp_encoder(crtc_state)) {
5239 const struct drm_display_mode *adjusted_mode =
5240 &crtc_state->base.adjusted_mode;
5241 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5242 i915_reg_t reg = TRANS_DP_CTL(pipe);
5243 enum port port;
5244
5245 temp = I915_READ(reg);
5246 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5247 TRANS_DP_SYNC_MASK |
5248 TRANS_DP_BPC_MASK);
5249 temp |= TRANS_DP_OUTPUT_ENABLE;
5250 temp |= bpc << 9; /* same format but at 11:9 */
5251
5252 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5253 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5254 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5255 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5256
5257 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5258 WARN_ON(port < PORT_B || port > PORT_D);
5259 temp |= TRANS_DP_PORT_SEL(port);
5260
5261 I915_WRITE(reg, temp);
5262 }
5263
5264 ironlake_enable_pch_transcoder(crtc_state);
5265 }
5266
5267 static void lpt_pch_enable(const struct intel_atomic_state *state,
5268 const struct intel_crtc_state *crtc_state)
5269 {
5270 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5271 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5272 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5273
5274 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5275
5276 lpt_program_iclkip(crtc_state);
5277
5278 /* Set transcoder timing. */
5279 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5280
5281 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5282 }
5283
5284 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
5285 {
5286 struct drm_i915_private *dev_priv = to_i915(dev);
5287 i915_reg_t dslreg = PIPEDSL(pipe);
5288 u32 temp;
5289
5290 temp = I915_READ(dslreg);
5291 udelay(500);
5292 if (wait_for(I915_READ(dslreg) != temp, 5)) {
5293 if (wait_for(I915_READ(dslreg) != temp, 5))
5294 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5295 }
5296 }
5297
5298 /*
5299 * The hardware phase 0.0 refers to the center of the pixel.
5300 * We want to start from the top/left edge which is phase
5301 * -0.5. That matches how the hardware calculates the scaling
5302 * factors (from top-left of the first pixel to bottom-right
5303 * of the last pixel, as opposed to the pixel centers).
5304 *
5305 * For 4:2:0 subsampled chroma planes we obviously have to
5306 * adjust that so that the chroma sample position lands in
5307 * the right spot.
5308 *
5309 * Note that for packed YCbCr 4:2:2 formats there is no way to
5310 * control chroma siting. The hardware simply replicates the
5311 * chroma samples for both of the luma samples, and thus we don't
5312 * actually get the expected MPEG2 chroma siting convention :(
5313 * The same behaviour is observed on pre-SKL platforms as well.
5314 *
5315 * Theory behind the formula (note that we ignore sub-pixel
5316 * source coordinates):
5317 * s = source sample position
5318 * d = destination sample position
5319 *
5320 * Downscaling 4:1:
5321 * -0.5
5322 * | 0.0
5323 * | | 1.5 (initial phase)
5324 * | | |
5325 * v v v
5326 * | s | s | s | s |
5327 * | d |
5328 *
5329 * Upscaling 1:4:
5330 * -0.5
5331 * | -0.375 (initial phase)
5332 * | | 0.0
5333 * | | |
5334 * v v v
5335 * | s |
5336 * | d | d | d | d |
5337 */
5338 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5339 {
5340 int phase = -0x8000;
5341 u16 trip = 0;
5342
5343 if (chroma_cosited)
5344 phase += (sub - 1) * 0x8000 / sub;
5345
5346 phase += scale / (2 * sub);
5347
5348 /*
5349 * Hardware initial phase limited to [-0.5:1.5].
5350 * Since the max hardware scale factor is 3.0, we
5351 * should never actually excdeed 1.0 here.
5352 */
5353 WARN_ON(phase < -0x8000 || phase > 0x18000);
5354
5355 if (phase < 0)
5356 phase = 0x10000 + phase;
5357 else
5358 trip = PS_PHASE_TRIP;
5359
5360 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5361 }
5362
5363 #define SKL_MIN_SRC_W 8
5364 #define SKL_MAX_SRC_W 4096
5365 #define SKL_MIN_SRC_H 8
5366 #define SKL_MAX_SRC_H 4096
5367 #define SKL_MIN_DST_W 8
5368 #define SKL_MAX_DST_W 4096
5369 #define SKL_MIN_DST_H 8
5370 #define SKL_MAX_DST_H 4096
5371 #define ICL_MAX_SRC_W 5120
5372 #define ICL_MAX_SRC_H 4096
5373 #define ICL_MAX_DST_W 5120
5374 #define ICL_MAX_DST_H 4096
5375 #define SKL_MIN_YUV_420_SRC_W 16
5376 #define SKL_MIN_YUV_420_SRC_H 16
5377
5378 static int
5379 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5380 unsigned int scaler_user, int *scaler_id,
5381 int src_w, int src_h, int dst_w, int dst_h,
5382 const struct drm_format_info *format, bool need_scaler)
5383 {
5384 struct intel_crtc_scaler_state *scaler_state =
5385 &crtc_state->scaler_state;
5386 struct intel_crtc *intel_crtc =
5387 to_intel_crtc(crtc_state->base.crtc);
5388 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5389 const struct drm_display_mode *adjusted_mode =
5390 &crtc_state->base.adjusted_mode;
5391
5392 /*
5393 * Src coordinates are already rotated by 270 degrees for
5394 * the 90/270 degree plane rotation cases (to match the
5395 * GTT mapping), hence no need to account for rotation here.
5396 */
5397 if (src_w != dst_w || src_h != dst_h)
5398 need_scaler = true;
5399
5400 /*
5401 * Scaling/fitting not supported in IF-ID mode in GEN9+
5402 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5403 * Once NV12 is enabled, handle it here while allocating scaler
5404 * for NV12.
5405 */
5406 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5407 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5408 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5409 return -EINVAL;
5410 }
5411
5412 /*
5413 * if plane is being disabled or scaler is no more required or force detach
5414 * - free scaler binded to this plane/crtc
5415 * - in order to do this, update crtc->scaler_usage
5416 *
5417 * Here scaler state in crtc_state is set free so that
5418 * scaler can be assigned to other user. Actual register
5419 * update to free the scaler is done in plane/panel-fit programming.
5420 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5421 */
5422 if (force_detach || !need_scaler) {
5423 if (*scaler_id >= 0) {
5424 scaler_state->scaler_users &= ~(1 << scaler_user);
5425 scaler_state->scalers[*scaler_id].in_use = 0;
5426
5427 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5428 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5429 intel_crtc->pipe, scaler_user, *scaler_id,
5430 scaler_state->scaler_users);
5431 *scaler_id = -1;
5432 }
5433 return 0;
5434 }
5435
5436 if (format && is_planar_yuv_format(format->format) &&
5437 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5438 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5439 return -EINVAL;
5440 }
5441
5442 /* range checks */
5443 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5444 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5445 (INTEL_GEN(dev_priv) >= 11 &&
5446 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5447 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5448 (INTEL_GEN(dev_priv) < 11 &&
5449 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5450 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5451 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5452 "size is out of scaler range\n",
5453 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5454 return -EINVAL;
5455 }
5456
5457 /* mark this plane as a scaler user in crtc_state */
5458 scaler_state->scaler_users |= (1 << scaler_user);
5459 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5460 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5461 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5462 scaler_state->scaler_users);
5463
5464 return 0;
5465 }
5466
5467 /**
5468 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5469 *
5470 * @state: crtc's scaler state
5471 *
5472 * Return
5473 * 0 - scaler_usage updated successfully
5474 * error - requested scaling cannot be supported or other error condition
5475 */
5476 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5477 {
5478 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5479 bool need_scaler = false;
5480
5481 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5482 need_scaler = true;
5483
5484 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5485 &state->scaler_state.scaler_id,
5486 state->pipe_src_w, state->pipe_src_h,
5487 adjusted_mode->crtc_hdisplay,
5488 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5489 }
5490
5491 /**
5492 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5493 * @crtc_state: crtc's scaler state
5494 * @plane_state: atomic plane state to update
5495 *
5496 * Return
5497 * 0 - scaler_usage updated successfully
5498 * error - requested scaling cannot be supported or other error condition
5499 */
5500 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5501 struct intel_plane_state *plane_state)
5502 {
5503 struct intel_plane *intel_plane =
5504 to_intel_plane(plane_state->base.plane);
5505 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5506 struct drm_framebuffer *fb = plane_state->base.fb;
5507 int ret;
5508 bool force_detach = !fb || !plane_state->base.visible;
5509 bool need_scaler = false;
5510
5511 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5512 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5513 fb && is_planar_yuv_format(fb->format->format))
5514 need_scaler = true;
5515
5516 ret = skl_update_scaler(crtc_state, force_detach,
5517 drm_plane_index(&intel_plane->base),
5518 &plane_state->scaler_id,
5519 drm_rect_width(&plane_state->base.src) >> 16,
5520 drm_rect_height(&plane_state->base.src) >> 16,
5521 drm_rect_width(&plane_state->base.dst),
5522 drm_rect_height(&plane_state->base.dst),
5523 fb ? fb->format : NULL, need_scaler);
5524
5525 if (ret || plane_state->scaler_id < 0)
5526 return ret;
5527
5528 /* check colorkey */
5529 if (plane_state->ckey.flags) {
5530 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5531 intel_plane->base.base.id,
5532 intel_plane->base.name);
5533 return -EINVAL;
5534 }
5535
5536 /* Check src format */
5537 switch (fb->format->format) {
5538 case DRM_FORMAT_RGB565:
5539 case DRM_FORMAT_XBGR8888:
5540 case DRM_FORMAT_XRGB8888:
5541 case DRM_FORMAT_ABGR8888:
5542 case DRM_FORMAT_ARGB8888:
5543 case DRM_FORMAT_XRGB2101010:
5544 case DRM_FORMAT_XBGR2101010:
5545 case DRM_FORMAT_XBGR16161616F:
5546 case DRM_FORMAT_ABGR16161616F:
5547 case DRM_FORMAT_XRGB16161616F:
5548 case DRM_FORMAT_ARGB16161616F:
5549 case DRM_FORMAT_YUYV:
5550 case DRM_FORMAT_YVYU:
5551 case DRM_FORMAT_UYVY:
5552 case DRM_FORMAT_VYUY:
5553 case DRM_FORMAT_NV12:
5554 case DRM_FORMAT_P010:
5555 case DRM_FORMAT_P012:
5556 case DRM_FORMAT_P016:
5557 case DRM_FORMAT_Y210:
5558 case DRM_FORMAT_Y212:
5559 case DRM_FORMAT_Y216:
5560 case DRM_FORMAT_XVYU2101010:
5561 case DRM_FORMAT_XVYU12_16161616:
5562 case DRM_FORMAT_XVYU16161616:
5563 break;
5564 default:
5565 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5566 intel_plane->base.base.id, intel_plane->base.name,
5567 fb->base.id, fb->format->format);
5568 return -EINVAL;
5569 }
5570
5571 return 0;
5572 }
5573
5574 static void skylake_scaler_disable(struct intel_crtc *crtc)
5575 {
5576 int i;
5577
5578 for (i = 0; i < crtc->num_scalers; i++)
5579 skl_detach_scaler(crtc, i);
5580 }
5581
5582 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5583 {
5584 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5585 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5586 enum pipe pipe = crtc->pipe;
5587 const struct intel_crtc_scaler_state *scaler_state =
5588 &crtc_state->scaler_state;
5589
5590 if (crtc_state->pch_pfit.enabled) {
5591 u16 uv_rgb_hphase, uv_rgb_vphase;
5592 int pfit_w, pfit_h, hscale, vscale;
5593 int id;
5594
5595 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5596 return;
5597
5598 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5599 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5600
5601 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5602 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5603
5604 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5605 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5606
5607 id = scaler_state->scaler_id;
5608 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5609 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5610 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5611 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5612 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5613 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5614 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5615 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5616 }
5617 }
5618
5619 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5620 {
5621 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5622 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5623 int pipe = crtc->pipe;
5624
5625 if (crtc_state->pch_pfit.enabled) {
5626 /* Force use of hard-coded filter coefficients
5627 * as some pre-programmed values are broken,
5628 * e.g. x201.
5629 */
5630 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5631 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5632 PF_PIPE_SEL_IVB(pipe));
5633 else
5634 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5635 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5636 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5637 }
5638 }
5639
5640 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5641 {
5642 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5643 struct drm_device *dev = crtc->base.dev;
5644 struct drm_i915_private *dev_priv = to_i915(dev);
5645
5646 if (!crtc_state->ips_enabled)
5647 return;
5648
5649 /*
5650 * We can only enable IPS after we enable a plane and wait for a vblank
5651 * This function is called from post_plane_update, which is run after
5652 * a vblank wait.
5653 */
5654 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5655
5656 if (IS_BROADWELL(dev_priv)) {
5657 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5658 IPS_ENABLE | IPS_PCODE_CONTROL));
5659 /* Quoting Art Runyan: "its not safe to expect any particular
5660 * value in IPS_CTL bit 31 after enabling IPS through the
5661 * mailbox." Moreover, the mailbox may return a bogus state,
5662 * so we need to just enable it and continue on.
5663 */
5664 } else {
5665 I915_WRITE(IPS_CTL, IPS_ENABLE);
5666 /* The bit only becomes 1 in the next vblank, so this wait here
5667 * is essentially intel_wait_for_vblank. If we don't have this
5668 * and don't wait for vblanks until the end of crtc_enable, then
5669 * the HW state readout code will complain that the expected
5670 * IPS_CTL value is not the one we read. */
5671 if (intel_wait_for_register(&dev_priv->uncore,
5672 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5673 50))
5674 DRM_ERROR("Timed out waiting for IPS enable\n");
5675 }
5676 }
5677
5678 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5679 {
5680 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5681 struct drm_device *dev = crtc->base.dev;
5682 struct drm_i915_private *dev_priv = to_i915(dev);
5683
5684 if (!crtc_state->ips_enabled)
5685 return;
5686
5687 if (IS_BROADWELL(dev_priv)) {
5688 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5689 /*
5690 * Wait for PCODE to finish disabling IPS. The BSpec specified
5691 * 42ms timeout value leads to occasional timeouts so use 100ms
5692 * instead.
5693 */
5694 if (intel_wait_for_register(&dev_priv->uncore,
5695 IPS_CTL, IPS_ENABLE, 0,
5696 100))
5697 DRM_ERROR("Timed out waiting for IPS disable\n");
5698 } else {
5699 I915_WRITE(IPS_CTL, 0);
5700 POSTING_READ(IPS_CTL);
5701 }
5702
5703 /* We need to wait for a vblank before we can disable the plane. */
5704 intel_wait_for_vblank(dev_priv, crtc->pipe);
5705 }
5706
5707 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5708 {
5709 if (intel_crtc->overlay) {
5710 struct drm_device *dev = intel_crtc->base.dev;
5711
5712 mutex_lock(&dev->struct_mutex);
5713 (void) intel_overlay_switch_off(intel_crtc->overlay);
5714 mutex_unlock(&dev->struct_mutex);
5715 }
5716
5717 /* Let userspace switch the overlay on again. In most cases userspace
5718 * has to recompute where to put it anyway.
5719 */
5720 }
5721
5722 /**
5723 * intel_post_enable_primary - Perform operations after enabling primary plane
5724 * @crtc: the CRTC whose primary plane was just enabled
5725 * @new_crtc_state: the enabling state
5726 *
5727 * Performs potentially sleeping operations that must be done after the primary
5728 * plane is enabled, such as updating FBC and IPS. Note that this may be
5729 * called due to an explicit primary plane update, or due to an implicit
5730 * re-enable that is caused when a sprite plane is updated to no longer
5731 * completely hide the primary plane.
5732 */
5733 static void
5734 intel_post_enable_primary(struct drm_crtc *crtc,
5735 const struct intel_crtc_state *new_crtc_state)
5736 {
5737 struct drm_device *dev = crtc->dev;
5738 struct drm_i915_private *dev_priv = to_i915(dev);
5739 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5740 int pipe = intel_crtc->pipe;
5741
5742 /*
5743 * Gen2 reports pipe underruns whenever all planes are disabled.
5744 * So don't enable underrun reporting before at least some planes
5745 * are enabled.
5746 * FIXME: Need to fix the logic to work when we turn off all planes
5747 * but leave the pipe running.
5748 */
5749 if (IS_GEN(dev_priv, 2))
5750 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5751
5752 /* Underruns don't always raise interrupts, so check manually. */
5753 intel_check_cpu_fifo_underruns(dev_priv);
5754 intel_check_pch_fifo_underruns(dev_priv);
5755 }
5756
5757 /* FIXME get rid of this and use pre_plane_update */
5758 static void
5759 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5760 {
5761 struct drm_device *dev = crtc->dev;
5762 struct drm_i915_private *dev_priv = to_i915(dev);
5763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5764 int pipe = intel_crtc->pipe;
5765
5766 /*
5767 * Gen2 reports pipe underruns whenever all planes are disabled.
5768 * So disable underrun reporting before all the planes get disabled.
5769 */
5770 if (IS_GEN(dev_priv, 2))
5771 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5772
5773 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5774
5775 /*
5776 * Vblank time updates from the shadow to live plane control register
5777 * are blocked if the memory self-refresh mode is active at that
5778 * moment. So to make sure the plane gets truly disabled, disable
5779 * first the self-refresh mode. The self-refresh enable bit in turn
5780 * will be checked/applied by the HW only at the next frame start
5781 * event which is after the vblank start event, so we need to have a
5782 * wait-for-vblank between disabling the plane and the pipe.
5783 */
5784 if (HAS_GMCH(dev_priv) &&
5785 intel_set_memory_cxsr(dev_priv, false))
5786 intel_wait_for_vblank(dev_priv, pipe);
5787 }
5788
5789 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5790 const struct intel_crtc_state *new_crtc_state)
5791 {
5792 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5793 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5794
5795 if (!old_crtc_state->ips_enabled)
5796 return false;
5797
5798 if (needs_modeset(&new_crtc_state->base))
5799 return true;
5800
5801 /*
5802 * Workaround : Do not read or write the pipe palette/gamma data while
5803 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5804 *
5805 * Disable IPS before we program the LUT.
5806 */
5807 if (IS_HASWELL(dev_priv) &&
5808 (new_crtc_state->base.color_mgmt_changed ||
5809 new_crtc_state->update_pipe) &&
5810 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5811 return true;
5812
5813 return !new_crtc_state->ips_enabled;
5814 }
5815
5816 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5817 const struct intel_crtc_state *new_crtc_state)
5818 {
5819 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5821
5822 if (!new_crtc_state->ips_enabled)
5823 return false;
5824
5825 if (needs_modeset(&new_crtc_state->base))
5826 return true;
5827
5828 /*
5829 * Workaround : Do not read or write the pipe palette/gamma data while
5830 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5831 *
5832 * Re-enable IPS after the LUT has been programmed.
5833 */
5834 if (IS_HASWELL(dev_priv) &&
5835 (new_crtc_state->base.color_mgmt_changed ||
5836 new_crtc_state->update_pipe) &&
5837 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5838 return true;
5839
5840 /*
5841 * We can't read out IPS on broadwell, assume the worst and
5842 * forcibly enable IPS on the first fastset.
5843 */
5844 if (new_crtc_state->update_pipe &&
5845 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5846 return true;
5847
5848 return !old_crtc_state->ips_enabled;
5849 }
5850
5851 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5852 const struct intel_crtc_state *crtc_state)
5853 {
5854 if (!crtc_state->nv12_planes)
5855 return false;
5856
5857 /* WA Display #0827: Gen9:all */
5858 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5859 return true;
5860
5861 return false;
5862 }
5863
5864 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5865 const struct intel_crtc_state *crtc_state)
5866 {
5867 /* Wa_2006604312:icl */
5868 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5869 return true;
5870
5871 return false;
5872 }
5873
5874 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5875 {
5876 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5877 struct drm_device *dev = crtc->base.dev;
5878 struct drm_i915_private *dev_priv = to_i915(dev);
5879 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5880 struct intel_crtc_state *pipe_config =
5881 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5882 crtc);
5883 struct drm_plane *primary = crtc->base.primary;
5884 struct drm_plane_state *old_primary_state =
5885 drm_atomic_get_old_plane_state(old_state, primary);
5886
5887 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5888
5889 if (pipe_config->update_wm_post && pipe_config->base.active)
5890 intel_update_watermarks(crtc);
5891
5892 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5893 hsw_enable_ips(pipe_config);
5894
5895 if (old_primary_state) {
5896 struct drm_plane_state *new_primary_state =
5897 drm_atomic_get_new_plane_state(old_state, primary);
5898
5899 intel_fbc_post_update(crtc);
5900
5901 if (new_primary_state->visible &&
5902 (needs_modeset(&pipe_config->base) ||
5903 !old_primary_state->visible))
5904 intel_post_enable_primary(&crtc->base, pipe_config);
5905 }
5906
5907 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5908 !needs_nv12_wa(dev_priv, pipe_config))
5909 skl_wa_827(dev_priv, crtc->pipe, false);
5910
5911 if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5912 !needs_scalerclk_wa(dev_priv, pipe_config))
5913 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5914 }
5915
5916 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5917 struct intel_crtc_state *pipe_config)
5918 {
5919 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5920 struct drm_device *dev = crtc->base.dev;
5921 struct drm_i915_private *dev_priv = to_i915(dev);
5922 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5923 struct drm_plane *primary = crtc->base.primary;
5924 struct drm_plane_state *old_primary_state =
5925 drm_atomic_get_old_plane_state(old_state, primary);
5926 bool modeset = needs_modeset(&pipe_config->base);
5927 struct intel_atomic_state *old_intel_state =
5928 to_intel_atomic_state(old_state);
5929
5930 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5931 hsw_disable_ips(old_crtc_state);
5932
5933 if (old_primary_state) {
5934 struct intel_plane_state *new_primary_state =
5935 intel_atomic_get_new_plane_state(old_intel_state,
5936 to_intel_plane(primary));
5937
5938 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5939 /*
5940 * Gen2 reports pipe underruns whenever all planes are disabled.
5941 * So disable underrun reporting before all the planes get disabled.
5942 */
5943 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5944 (modeset || !new_primary_state->base.visible))
5945 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5946 }
5947
5948 /* Display WA 827 */
5949 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5950 needs_nv12_wa(dev_priv, pipe_config))
5951 skl_wa_827(dev_priv, crtc->pipe, true);
5952
5953 /* Wa_2006604312:icl */
5954 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5955 needs_scalerclk_wa(dev_priv, pipe_config))
5956 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5957
5958 /*
5959 * Vblank time updates from the shadow to live plane control register
5960 * are blocked if the memory self-refresh mode is active at that
5961 * moment. So to make sure the plane gets truly disabled, disable
5962 * first the self-refresh mode. The self-refresh enable bit in turn
5963 * will be checked/applied by the HW only at the next frame start
5964 * event which is after the vblank start event, so we need to have a
5965 * wait-for-vblank between disabling the plane and the pipe.
5966 */
5967 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5968 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5969 intel_wait_for_vblank(dev_priv, crtc->pipe);
5970
5971 /*
5972 * IVB workaround: must disable low power watermarks for at least
5973 * one frame before enabling scaling. LP watermarks can be re-enabled
5974 * when scaling is disabled.
5975 *
5976 * WaCxSRDisabledForSpriteScaling:ivb
5977 */
5978 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5979 old_crtc_state->base.active)
5980 intel_wait_for_vblank(dev_priv, crtc->pipe);
5981
5982 /*
5983 * If we're doing a modeset, we're done. No need to do any pre-vblank
5984 * watermark programming here.
5985 */
5986 if (needs_modeset(&pipe_config->base))
5987 return;
5988
5989 /*
5990 * For platforms that support atomic watermarks, program the
5991 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5992 * will be the intermediate values that are safe for both pre- and
5993 * post- vblank; when vblank happens, the 'active' values will be set
5994 * to the final 'target' values and we'll do this again to get the
5995 * optimal watermarks. For gen9+ platforms, the values we program here
5996 * will be the final target values which will get automatically latched
5997 * at vblank time; no further programming will be necessary.
5998 *
5999 * If a platform hasn't been transitioned to atomic watermarks yet,
6000 * we'll continue to update watermarks the old way, if flags tell
6001 * us to.
6002 */
6003 if (dev_priv->display.initial_watermarks != NULL)
6004 dev_priv->display.initial_watermarks(old_intel_state,
6005 pipe_config);
6006 else if (pipe_config->update_wm_pre)
6007 intel_update_watermarks(crtc);
6008 }
6009
6010 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6011 struct intel_crtc *crtc)
6012 {
6013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6014 const struct intel_crtc_state *new_crtc_state =
6015 intel_atomic_get_new_crtc_state(state, crtc);
6016 unsigned int update_mask = new_crtc_state->update_planes;
6017 const struct intel_plane_state *old_plane_state;
6018 struct intel_plane *plane;
6019 unsigned fb_bits = 0;
6020 int i;
6021
6022 intel_crtc_dpms_overlay_disable(crtc);
6023
6024 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6025 if (crtc->pipe != plane->pipe ||
6026 !(update_mask & BIT(plane->id)))
6027 continue;
6028
6029 intel_disable_plane(plane, new_crtc_state);
6030
6031 if (old_plane_state->base.visible)
6032 fb_bits |= plane->frontbuffer_bit;
6033 }
6034
6035 intel_frontbuffer_flip(dev_priv, fb_bits);
6036 }
6037
6038 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
6039 struct intel_crtc_state *crtc_state,
6040 struct drm_atomic_state *old_state)
6041 {
6042 struct drm_connector_state *conn_state;
6043 struct drm_connector *conn;
6044 int i;
6045
6046 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6047 struct intel_encoder *encoder =
6048 to_intel_encoder(conn_state->best_encoder);
6049
6050 if (conn_state->crtc != crtc)
6051 continue;
6052
6053 if (encoder->pre_pll_enable)
6054 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6055 }
6056 }
6057
6058 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
6059 struct intel_crtc_state *crtc_state,
6060 struct drm_atomic_state *old_state)
6061 {
6062 struct drm_connector_state *conn_state;
6063 struct drm_connector *conn;
6064 int i;
6065
6066 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6067 struct intel_encoder *encoder =
6068 to_intel_encoder(conn_state->best_encoder);
6069
6070 if (conn_state->crtc != crtc)
6071 continue;
6072
6073 if (encoder->pre_enable)
6074 encoder->pre_enable(encoder, crtc_state, conn_state);
6075 }
6076 }
6077
6078 static void intel_encoders_enable(struct drm_crtc *crtc,
6079 struct intel_crtc_state *crtc_state,
6080 struct drm_atomic_state *old_state)
6081 {
6082 struct drm_connector_state *conn_state;
6083 struct drm_connector *conn;
6084 int i;
6085
6086 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6087 struct intel_encoder *encoder =
6088 to_intel_encoder(conn_state->best_encoder);
6089
6090 if (conn_state->crtc != crtc)
6091 continue;
6092
6093 if (encoder->enable)
6094 encoder->enable(encoder, crtc_state, conn_state);
6095 intel_opregion_notify_encoder(encoder, true);
6096 }
6097 }
6098
6099 static void intel_encoders_disable(struct drm_crtc *crtc,
6100 struct intel_crtc_state *old_crtc_state,
6101 struct drm_atomic_state *old_state)
6102 {
6103 struct drm_connector_state *old_conn_state;
6104 struct drm_connector *conn;
6105 int i;
6106
6107 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6108 struct intel_encoder *encoder =
6109 to_intel_encoder(old_conn_state->best_encoder);
6110
6111 if (old_conn_state->crtc != crtc)
6112 continue;
6113
6114 intel_opregion_notify_encoder(encoder, false);
6115 if (encoder->disable)
6116 encoder->disable(encoder, old_crtc_state, old_conn_state);
6117 }
6118 }
6119
6120 static void intel_encoders_post_disable(struct drm_crtc *crtc,
6121 struct intel_crtc_state *old_crtc_state,
6122 struct drm_atomic_state *old_state)
6123 {
6124 struct drm_connector_state *old_conn_state;
6125 struct drm_connector *conn;
6126 int i;
6127
6128 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6129 struct intel_encoder *encoder =
6130 to_intel_encoder(old_conn_state->best_encoder);
6131
6132 if (old_conn_state->crtc != crtc)
6133 continue;
6134
6135 if (encoder->post_disable)
6136 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6137 }
6138 }
6139
6140 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
6141 struct intel_crtc_state *old_crtc_state,
6142 struct drm_atomic_state *old_state)
6143 {
6144 struct drm_connector_state *old_conn_state;
6145 struct drm_connector *conn;
6146 int i;
6147
6148 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6149 struct intel_encoder *encoder =
6150 to_intel_encoder(old_conn_state->best_encoder);
6151
6152 if (old_conn_state->crtc != crtc)
6153 continue;
6154
6155 if (encoder->post_pll_disable)
6156 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6157 }
6158 }
6159
6160 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
6161 struct intel_crtc_state *crtc_state,
6162 struct drm_atomic_state *old_state)
6163 {
6164 struct drm_connector_state *conn_state;
6165 struct drm_connector *conn;
6166 int i;
6167
6168 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6169 struct intel_encoder *encoder =
6170 to_intel_encoder(conn_state->best_encoder);
6171
6172 if (conn_state->crtc != crtc)
6173 continue;
6174
6175 if (encoder->update_pipe)
6176 encoder->update_pipe(encoder, crtc_state, conn_state);
6177 }
6178 }
6179
6180 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6181 {
6182 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6183 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6184
6185 plane->disable_plane(plane, crtc_state);
6186 }
6187
6188 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6189 struct drm_atomic_state *old_state)
6190 {
6191 struct drm_crtc *crtc = pipe_config->base.crtc;
6192 struct drm_device *dev = crtc->dev;
6193 struct drm_i915_private *dev_priv = to_i915(dev);
6194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6195 int pipe = intel_crtc->pipe;
6196 struct intel_atomic_state *old_intel_state =
6197 to_intel_atomic_state(old_state);
6198
6199 if (WARN_ON(intel_crtc->active))
6200 return;
6201
6202 /*
6203 * Sometimes spurious CPU pipe underruns happen during FDI
6204 * training, at least with VGA+HDMI cloning. Suppress them.
6205 *
6206 * On ILK we get an occasional spurious CPU pipe underruns
6207 * between eDP port A enable and vdd enable. Also PCH port
6208 * enable seems to result in the occasional CPU pipe underrun.
6209 *
6210 * Spurious PCH underruns also occur during PCH enabling.
6211 */
6212 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6213 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6214
6215 if (pipe_config->has_pch_encoder)
6216 intel_prepare_shared_dpll(pipe_config);
6217
6218 if (intel_crtc_has_dp_encoder(pipe_config))
6219 intel_dp_set_m_n(pipe_config, M1_N1);
6220
6221 intel_set_pipe_timings(pipe_config);
6222 intel_set_pipe_src_size(pipe_config);
6223
6224 if (pipe_config->has_pch_encoder) {
6225 intel_cpu_transcoder_set_m_n(pipe_config,
6226 &pipe_config->fdi_m_n, NULL);
6227 }
6228
6229 ironlake_set_pipeconf(pipe_config);
6230
6231 intel_crtc->active = true;
6232
6233 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6234
6235 if (pipe_config->has_pch_encoder) {
6236 /* Note: FDI PLL enabling _must_ be done before we enable the
6237 * cpu pipes, hence this is separate from all the other fdi/pch
6238 * enabling. */
6239 ironlake_fdi_pll_enable(pipe_config);
6240 } else {
6241 assert_fdi_tx_disabled(dev_priv, pipe);
6242 assert_fdi_rx_disabled(dev_priv, pipe);
6243 }
6244
6245 ironlake_pfit_enable(pipe_config);
6246
6247 /*
6248 * On ILK+ LUT must be loaded before the pipe is running but with
6249 * clocks enabled
6250 */
6251 intel_color_load_luts(pipe_config);
6252 intel_color_commit(pipe_config);
6253 /* update DSPCNTR to configure gamma for pipe bottom color */
6254 intel_disable_primary_plane(pipe_config);
6255
6256 if (dev_priv->display.initial_watermarks != NULL)
6257 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6258 intel_enable_pipe(pipe_config);
6259
6260 if (pipe_config->has_pch_encoder)
6261 ironlake_pch_enable(old_intel_state, pipe_config);
6262
6263 assert_vblank_disabled(crtc);
6264 intel_crtc_vblank_on(pipe_config);
6265
6266 intel_encoders_enable(crtc, pipe_config, old_state);
6267
6268 if (HAS_PCH_CPT(dev_priv))
6269 cpt_verify_modeset(dev, intel_crtc->pipe);
6270
6271 /*
6272 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6273 * And a second vblank wait is needed at least on ILK with
6274 * some interlaced HDMI modes. Let's do the double wait always
6275 * in case there are more corner cases we don't know about.
6276 */
6277 if (pipe_config->has_pch_encoder) {
6278 intel_wait_for_vblank(dev_priv, pipe);
6279 intel_wait_for_vblank(dev_priv, pipe);
6280 }
6281 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6282 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6283 }
6284
6285 /* IPS only exists on ULT machines and is tied to pipe A. */
6286 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6287 {
6288 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6289 }
6290
6291 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6292 enum pipe pipe, bool apply)
6293 {
6294 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6295 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6296
6297 if (apply)
6298 val |= mask;
6299 else
6300 val &= ~mask;
6301
6302 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6303 }
6304
6305 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6306 {
6307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6308 enum pipe pipe = crtc->pipe;
6309 u32 val;
6310
6311 val = MBUS_DBOX_A_CREDIT(2);
6312 val |= MBUS_DBOX_BW_CREDIT(1);
6313 val |= MBUS_DBOX_B_CREDIT(8);
6314
6315 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6316 }
6317
6318 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6319 struct drm_atomic_state *old_state)
6320 {
6321 struct drm_crtc *crtc = pipe_config->base.crtc;
6322 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6323 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6324 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6325 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6326 struct intel_atomic_state *old_intel_state =
6327 to_intel_atomic_state(old_state);
6328 bool psl_clkgate_wa;
6329
6330 if (WARN_ON(intel_crtc->active))
6331 return;
6332
6333 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6334
6335 if (pipe_config->shared_dpll)
6336 intel_enable_shared_dpll(pipe_config);
6337
6338 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6339
6340 if (intel_crtc_has_dp_encoder(pipe_config))
6341 intel_dp_set_m_n(pipe_config, M1_N1);
6342
6343 if (!transcoder_is_dsi(cpu_transcoder))
6344 intel_set_pipe_timings(pipe_config);
6345
6346 intel_set_pipe_src_size(pipe_config);
6347
6348 if (cpu_transcoder != TRANSCODER_EDP &&
6349 !transcoder_is_dsi(cpu_transcoder)) {
6350 I915_WRITE(PIPE_MULT(cpu_transcoder),
6351 pipe_config->pixel_multiplier - 1);
6352 }
6353
6354 if (pipe_config->has_pch_encoder) {
6355 intel_cpu_transcoder_set_m_n(pipe_config,
6356 &pipe_config->fdi_m_n, NULL);
6357 }
6358
6359 if (!transcoder_is_dsi(cpu_transcoder))
6360 haswell_set_pipeconf(pipe_config);
6361
6362 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6363 bdw_set_pipemisc(pipe_config);
6364
6365 intel_crtc->active = true;
6366
6367 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6368 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6369 pipe_config->pch_pfit.enabled;
6370 if (psl_clkgate_wa)
6371 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6372
6373 if (INTEL_GEN(dev_priv) >= 9)
6374 skylake_pfit_enable(pipe_config);
6375 else
6376 ironlake_pfit_enable(pipe_config);
6377
6378 /*
6379 * On ILK+ LUT must be loaded before the pipe is running but with
6380 * clocks enabled
6381 */
6382 intel_color_load_luts(pipe_config);
6383 intel_color_commit(pipe_config);
6384 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6385 if (INTEL_GEN(dev_priv) < 9)
6386 intel_disable_primary_plane(pipe_config);
6387
6388 if (INTEL_GEN(dev_priv) >= 11)
6389 icl_set_pipe_chicken(intel_crtc);
6390
6391 intel_ddi_set_pipe_settings(pipe_config);
6392 if (!transcoder_is_dsi(cpu_transcoder))
6393 intel_ddi_enable_transcoder_func(pipe_config);
6394
6395 if (dev_priv->display.initial_watermarks != NULL)
6396 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6397
6398 if (INTEL_GEN(dev_priv) >= 11)
6399 icl_pipe_mbus_enable(intel_crtc);
6400
6401 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6402 if (!transcoder_is_dsi(cpu_transcoder))
6403 intel_enable_pipe(pipe_config);
6404
6405 if (pipe_config->has_pch_encoder)
6406 lpt_pch_enable(old_intel_state, pipe_config);
6407
6408 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6409 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6410
6411 assert_vblank_disabled(crtc);
6412 intel_crtc_vblank_on(pipe_config);
6413
6414 intel_encoders_enable(crtc, pipe_config, old_state);
6415
6416 if (psl_clkgate_wa) {
6417 intel_wait_for_vblank(dev_priv, pipe);
6418 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6419 }
6420
6421 /* If we change the relative order between pipe/planes enabling, we need
6422 * to change the workaround. */
6423 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6424 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6425 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6426 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6427 }
6428 }
6429
6430 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6431 {
6432 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6434 enum pipe pipe = crtc->pipe;
6435
6436 /* To avoid upsetting the power well on haswell only disable the pfit if
6437 * it's in use. The hw state code will make sure we get this right. */
6438 if (old_crtc_state->pch_pfit.enabled) {
6439 I915_WRITE(PF_CTL(pipe), 0);
6440 I915_WRITE(PF_WIN_POS(pipe), 0);
6441 I915_WRITE(PF_WIN_SZ(pipe), 0);
6442 }
6443 }
6444
6445 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6446 struct drm_atomic_state *old_state)
6447 {
6448 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6449 struct drm_device *dev = crtc->dev;
6450 struct drm_i915_private *dev_priv = to_i915(dev);
6451 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6452 int pipe = intel_crtc->pipe;
6453
6454 /*
6455 * Sometimes spurious CPU pipe underruns happen when the
6456 * pipe is already disabled, but FDI RX/TX is still enabled.
6457 * Happens at least with VGA+HDMI cloning. Suppress them.
6458 */
6459 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6460 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6461
6462 intel_encoders_disable(crtc, old_crtc_state, old_state);
6463
6464 drm_crtc_vblank_off(crtc);
6465 assert_vblank_disabled(crtc);
6466
6467 intel_disable_pipe(old_crtc_state);
6468
6469 ironlake_pfit_disable(old_crtc_state);
6470
6471 if (old_crtc_state->has_pch_encoder)
6472 ironlake_fdi_disable(crtc);
6473
6474 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6475
6476 if (old_crtc_state->has_pch_encoder) {
6477 ironlake_disable_pch_transcoder(dev_priv, pipe);
6478
6479 if (HAS_PCH_CPT(dev_priv)) {
6480 i915_reg_t reg;
6481 u32 temp;
6482
6483 /* disable TRANS_DP_CTL */
6484 reg = TRANS_DP_CTL(pipe);
6485 temp = I915_READ(reg);
6486 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6487 TRANS_DP_PORT_SEL_MASK);
6488 temp |= TRANS_DP_PORT_SEL_NONE;
6489 I915_WRITE(reg, temp);
6490
6491 /* disable DPLL_SEL */
6492 temp = I915_READ(PCH_DPLL_SEL);
6493 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6494 I915_WRITE(PCH_DPLL_SEL, temp);
6495 }
6496
6497 ironlake_fdi_pll_disable(intel_crtc);
6498 }
6499
6500 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6501 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6502 }
6503
6504 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6505 struct drm_atomic_state *old_state)
6506 {
6507 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6508 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6509 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6510 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6511
6512 intel_encoders_disable(crtc, old_crtc_state, old_state);
6513
6514 drm_crtc_vblank_off(crtc);
6515 assert_vblank_disabled(crtc);
6516
6517 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6518 if (!transcoder_is_dsi(cpu_transcoder))
6519 intel_disable_pipe(old_crtc_state);
6520
6521 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6522 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6523
6524 if (!transcoder_is_dsi(cpu_transcoder))
6525 intel_ddi_disable_transcoder_func(old_crtc_state);
6526
6527 intel_dsc_disable(old_crtc_state);
6528
6529 if (INTEL_GEN(dev_priv) >= 9)
6530 skylake_scaler_disable(intel_crtc);
6531 else
6532 ironlake_pfit_disable(old_crtc_state);
6533
6534 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6535
6536 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6537 }
6538
6539 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6540 {
6541 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6542 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6543
6544 if (!crtc_state->gmch_pfit.control)
6545 return;
6546
6547 /*
6548 * The panel fitter should only be adjusted whilst the pipe is disabled,
6549 * according to register description and PRM.
6550 */
6551 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6552 assert_pipe_disabled(dev_priv, crtc->pipe);
6553
6554 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6555 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6556
6557 /* Border color in case we don't scale up to the full screen. Black by
6558 * default, change to something else for debugging. */
6559 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6560 }
6561
6562 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6563 {
6564 if (port == PORT_NONE)
6565 return false;
6566
6567 if (IS_ELKHARTLAKE(dev_priv))
6568 return port <= PORT_C;
6569
6570 if (INTEL_GEN(dev_priv) >= 11)
6571 return port <= PORT_B;
6572
6573 return false;
6574 }
6575
6576 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6577 {
6578 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6579 return port >= PORT_C && port <= PORT_F;
6580
6581 return false;
6582 }
6583
6584 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6585 {
6586 if (!intel_port_is_tc(dev_priv, port))
6587 return PORT_TC_NONE;
6588
6589 return port - PORT_C;
6590 }
6591
6592 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6593 {
6594 switch (port) {
6595 case PORT_A:
6596 return POWER_DOMAIN_PORT_DDI_A_LANES;
6597 case PORT_B:
6598 return POWER_DOMAIN_PORT_DDI_B_LANES;
6599 case PORT_C:
6600 return POWER_DOMAIN_PORT_DDI_C_LANES;
6601 case PORT_D:
6602 return POWER_DOMAIN_PORT_DDI_D_LANES;
6603 case PORT_E:
6604 return POWER_DOMAIN_PORT_DDI_E_LANES;
6605 case PORT_F:
6606 return POWER_DOMAIN_PORT_DDI_F_LANES;
6607 default:
6608 MISSING_CASE(port);
6609 return POWER_DOMAIN_PORT_OTHER;
6610 }
6611 }
6612
6613 enum intel_display_power_domain
6614 intel_aux_power_domain(struct intel_digital_port *dig_port)
6615 {
6616 switch (dig_port->aux_ch) {
6617 case AUX_CH_A:
6618 return POWER_DOMAIN_AUX_A;
6619 case AUX_CH_B:
6620 return POWER_DOMAIN_AUX_B;
6621 case AUX_CH_C:
6622 return POWER_DOMAIN_AUX_C;
6623 case AUX_CH_D:
6624 return POWER_DOMAIN_AUX_D;
6625 case AUX_CH_E:
6626 return POWER_DOMAIN_AUX_E;
6627 case AUX_CH_F:
6628 return POWER_DOMAIN_AUX_F;
6629 default:
6630 MISSING_CASE(dig_port->aux_ch);
6631 return POWER_DOMAIN_AUX_A;
6632 }
6633 }
6634
6635 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6636 struct intel_crtc_state *crtc_state)
6637 {
6638 struct drm_device *dev = crtc->dev;
6639 struct drm_i915_private *dev_priv = to_i915(dev);
6640 struct drm_encoder *encoder;
6641 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6642 enum pipe pipe = intel_crtc->pipe;
6643 u64 mask;
6644 enum transcoder transcoder = crtc_state->cpu_transcoder;
6645
6646 if (!crtc_state->base.active)
6647 return 0;
6648
6649 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6650 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6651 if (crtc_state->pch_pfit.enabled ||
6652 crtc_state->pch_pfit.force_thru)
6653 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6654
6655 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6656 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6657
6658 mask |= BIT_ULL(intel_encoder->power_domain);
6659 }
6660
6661 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6662 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6663
6664 if (crtc_state->shared_dpll)
6665 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6666
6667 return mask;
6668 }
6669
6670 static u64
6671 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6672 struct intel_crtc_state *crtc_state)
6673 {
6674 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6675 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6676 enum intel_display_power_domain domain;
6677 u64 domains, new_domains, old_domains;
6678
6679 old_domains = intel_crtc->enabled_power_domains;
6680 intel_crtc->enabled_power_domains = new_domains =
6681 get_crtc_power_domains(crtc, crtc_state);
6682
6683 domains = new_domains & ~old_domains;
6684
6685 for_each_power_domain(domain, domains)
6686 intel_display_power_get(dev_priv, domain);
6687
6688 return old_domains & ~new_domains;
6689 }
6690
6691 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6692 u64 domains)
6693 {
6694 enum intel_display_power_domain domain;
6695
6696 for_each_power_domain(domain, domains)
6697 intel_display_power_put_unchecked(dev_priv, domain);
6698 }
6699
6700 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6701 struct drm_atomic_state *old_state)
6702 {
6703 struct intel_atomic_state *old_intel_state =
6704 to_intel_atomic_state(old_state);
6705 struct drm_crtc *crtc = pipe_config->base.crtc;
6706 struct drm_device *dev = crtc->dev;
6707 struct drm_i915_private *dev_priv = to_i915(dev);
6708 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6709 int pipe = intel_crtc->pipe;
6710
6711 if (WARN_ON(intel_crtc->active))
6712 return;
6713
6714 if (intel_crtc_has_dp_encoder(pipe_config))
6715 intel_dp_set_m_n(pipe_config, M1_N1);
6716
6717 intel_set_pipe_timings(pipe_config);
6718 intel_set_pipe_src_size(pipe_config);
6719
6720 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6721 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6722 I915_WRITE(CHV_CANVAS(pipe), 0);
6723 }
6724
6725 i9xx_set_pipeconf(pipe_config);
6726
6727 intel_crtc->active = true;
6728
6729 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6730
6731 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6732
6733 if (IS_CHERRYVIEW(dev_priv)) {
6734 chv_prepare_pll(intel_crtc, pipe_config);
6735 chv_enable_pll(intel_crtc, pipe_config);
6736 } else {
6737 vlv_prepare_pll(intel_crtc, pipe_config);
6738 vlv_enable_pll(intel_crtc, pipe_config);
6739 }
6740
6741 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6742
6743 i9xx_pfit_enable(pipe_config);
6744
6745 intel_color_load_luts(pipe_config);
6746 intel_color_commit(pipe_config);
6747 /* update DSPCNTR to configure gamma for pipe bottom color */
6748 intel_disable_primary_plane(pipe_config);
6749
6750 dev_priv->display.initial_watermarks(old_intel_state,
6751 pipe_config);
6752 intel_enable_pipe(pipe_config);
6753
6754 assert_vblank_disabled(crtc);
6755 intel_crtc_vblank_on(pipe_config);
6756
6757 intel_encoders_enable(crtc, pipe_config, old_state);
6758 }
6759
6760 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6761 {
6762 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6763 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6764
6765 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6766 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6767 }
6768
6769 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6770 struct drm_atomic_state *old_state)
6771 {
6772 struct intel_atomic_state *old_intel_state =
6773 to_intel_atomic_state(old_state);
6774 struct drm_crtc *crtc = pipe_config->base.crtc;
6775 struct drm_device *dev = crtc->dev;
6776 struct drm_i915_private *dev_priv = to_i915(dev);
6777 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6778 enum pipe pipe = intel_crtc->pipe;
6779
6780 if (WARN_ON(intel_crtc->active))
6781 return;
6782
6783 i9xx_set_pll_dividers(pipe_config);
6784
6785 if (intel_crtc_has_dp_encoder(pipe_config))
6786 intel_dp_set_m_n(pipe_config, M1_N1);
6787
6788 intel_set_pipe_timings(pipe_config);
6789 intel_set_pipe_src_size(pipe_config);
6790
6791 i9xx_set_pipeconf(pipe_config);
6792
6793 intel_crtc->active = true;
6794
6795 if (!IS_GEN(dev_priv, 2))
6796 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6797
6798 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6799
6800 i9xx_enable_pll(intel_crtc, pipe_config);
6801
6802 i9xx_pfit_enable(pipe_config);
6803
6804 intel_color_load_luts(pipe_config);
6805 intel_color_commit(pipe_config);
6806 /* update DSPCNTR to configure gamma for pipe bottom color */
6807 intel_disable_primary_plane(pipe_config);
6808
6809 if (dev_priv->display.initial_watermarks != NULL)
6810 dev_priv->display.initial_watermarks(old_intel_state,
6811 pipe_config);
6812 else
6813 intel_update_watermarks(intel_crtc);
6814 intel_enable_pipe(pipe_config);
6815
6816 assert_vblank_disabled(crtc);
6817 intel_crtc_vblank_on(pipe_config);
6818
6819 intel_encoders_enable(crtc, pipe_config, old_state);
6820 }
6821
6822 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6823 {
6824 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6825 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6826
6827 if (!old_crtc_state->gmch_pfit.control)
6828 return;
6829
6830 assert_pipe_disabled(dev_priv, crtc->pipe);
6831
6832 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6833 I915_READ(PFIT_CONTROL));
6834 I915_WRITE(PFIT_CONTROL, 0);
6835 }
6836
6837 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6838 struct drm_atomic_state *old_state)
6839 {
6840 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6841 struct drm_device *dev = crtc->dev;
6842 struct drm_i915_private *dev_priv = to_i915(dev);
6843 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6844 int pipe = intel_crtc->pipe;
6845
6846 /*
6847 * On gen2 planes are double buffered but the pipe isn't, so we must
6848 * wait for planes to fully turn off before disabling the pipe.
6849 */
6850 if (IS_GEN(dev_priv, 2))
6851 intel_wait_for_vblank(dev_priv, pipe);
6852
6853 intel_encoders_disable(crtc, old_crtc_state, old_state);
6854
6855 drm_crtc_vblank_off(crtc);
6856 assert_vblank_disabled(crtc);
6857
6858 intel_disable_pipe(old_crtc_state);
6859
6860 i9xx_pfit_disable(old_crtc_state);
6861
6862 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6863
6864 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6865 if (IS_CHERRYVIEW(dev_priv))
6866 chv_disable_pll(dev_priv, pipe);
6867 else if (IS_VALLEYVIEW(dev_priv))
6868 vlv_disable_pll(dev_priv, pipe);
6869 else
6870 i9xx_disable_pll(old_crtc_state);
6871 }
6872
6873 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6874
6875 if (!IS_GEN(dev_priv, 2))
6876 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6877
6878 if (!dev_priv->display.initial_watermarks)
6879 intel_update_watermarks(intel_crtc);
6880
6881 /* clock the pipe down to 640x480@60 to potentially save power */
6882 if (IS_I830(dev_priv))
6883 i830_enable_pipe(dev_priv, pipe);
6884 }
6885
6886 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6887 struct drm_modeset_acquire_ctx *ctx)
6888 {
6889 struct intel_encoder *encoder;
6890 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6891 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6892 struct intel_bw_state *bw_state =
6893 to_intel_bw_state(dev_priv->bw_obj.state);
6894 enum intel_display_power_domain domain;
6895 struct intel_plane *plane;
6896 u64 domains;
6897 struct drm_atomic_state *state;
6898 struct intel_crtc_state *crtc_state;
6899 int ret;
6900
6901 if (!intel_crtc->active)
6902 return;
6903
6904 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6905 const struct intel_plane_state *plane_state =
6906 to_intel_plane_state(plane->base.state);
6907
6908 if (plane_state->base.visible)
6909 intel_plane_disable_noatomic(intel_crtc, plane);
6910 }
6911
6912 state = drm_atomic_state_alloc(crtc->dev);
6913 if (!state) {
6914 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6915 crtc->base.id, crtc->name);
6916 return;
6917 }
6918
6919 state->acquire_ctx = ctx;
6920
6921 /* Everything's already locked, -EDEADLK can't happen. */
6922 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6923 ret = drm_atomic_add_affected_connectors(state, crtc);
6924
6925 WARN_ON(IS_ERR(crtc_state) || ret);
6926
6927 dev_priv->display.crtc_disable(crtc_state, state);
6928
6929 drm_atomic_state_put(state);
6930
6931 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6932 crtc->base.id, crtc->name);
6933
6934 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6935 crtc->state->active = false;
6936 intel_crtc->active = false;
6937 crtc->enabled = false;
6938 crtc->state->connector_mask = 0;
6939 crtc->state->encoder_mask = 0;
6940
6941 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6942 encoder->base.crtc = NULL;
6943
6944 intel_fbc_disable(intel_crtc);
6945 intel_update_watermarks(intel_crtc);
6946 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6947
6948 domains = intel_crtc->enabled_power_domains;
6949 for_each_power_domain(domain, domains)
6950 intel_display_power_put_unchecked(dev_priv, domain);
6951 intel_crtc->enabled_power_domains = 0;
6952
6953 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6954 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6955 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6956
6957 bw_state->data_rate[intel_crtc->pipe] = 0;
6958 bw_state->num_active_planes[intel_crtc->pipe] = 0;
6959 }
6960
6961 /*
6962 * turn all crtc's off, but do not adjust state
6963 * This has to be paired with a call to intel_modeset_setup_hw_state.
6964 */
6965 int intel_display_suspend(struct drm_device *dev)
6966 {
6967 struct drm_i915_private *dev_priv = to_i915(dev);
6968 struct drm_atomic_state *state;
6969 int ret;
6970
6971 state = drm_atomic_helper_suspend(dev);
6972 ret = PTR_ERR_OR_ZERO(state);
6973 if (ret)
6974 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6975 else
6976 dev_priv->modeset_restore_state = state;
6977 return ret;
6978 }
6979
6980 void intel_encoder_destroy(struct drm_encoder *encoder)
6981 {
6982 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6983
6984 drm_encoder_cleanup(encoder);
6985 kfree(intel_encoder);
6986 }
6987
6988 /* Cross check the actual hw state with our own modeset state tracking (and it's
6989 * internal consistency). */
6990 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6991 struct drm_connector_state *conn_state)
6992 {
6993 struct intel_connector *connector = to_intel_connector(conn_state->connector);
6994
6995 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6996 connector->base.base.id,
6997 connector->base.name);
6998
6999 if (connector->get_hw_state(connector)) {
7000 struct intel_encoder *encoder = connector->encoder;
7001
7002 I915_STATE_WARN(!crtc_state,
7003 "connector enabled without attached crtc\n");
7004
7005 if (!crtc_state)
7006 return;
7007
7008 I915_STATE_WARN(!crtc_state->active,
7009 "connector is active, but attached crtc isn't\n");
7010
7011 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7012 return;
7013
7014 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7015 "atomic encoder doesn't match attached encoder\n");
7016
7017 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7018 "attached encoder crtc differs from connector crtc\n");
7019 } else {
7020 I915_STATE_WARN(crtc_state && crtc_state->active,
7021 "attached crtc is active, but connector isn't\n");
7022 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7023 "best encoder set without crtc!\n");
7024 }
7025 }
7026
7027 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7028 {
7029 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7030 return crtc_state->fdi_lanes;
7031
7032 return 0;
7033 }
7034
7035 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7036 struct intel_crtc_state *pipe_config)
7037 {
7038 struct drm_i915_private *dev_priv = to_i915(dev);
7039 struct drm_atomic_state *state = pipe_config->base.state;
7040 struct intel_crtc *other_crtc;
7041 struct intel_crtc_state *other_crtc_state;
7042
7043 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7044 pipe_name(pipe), pipe_config->fdi_lanes);
7045 if (pipe_config->fdi_lanes > 4) {
7046 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7047 pipe_name(pipe), pipe_config->fdi_lanes);
7048 return -EINVAL;
7049 }
7050
7051 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7052 if (pipe_config->fdi_lanes > 2) {
7053 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7054 pipe_config->fdi_lanes);
7055 return -EINVAL;
7056 } else {
7057 return 0;
7058 }
7059 }
7060
7061 if (INTEL_INFO(dev_priv)->num_pipes == 2)
7062 return 0;
7063
7064 /* Ivybridge 3 pipe is really complicated */
7065 switch (pipe) {
7066 case PIPE_A:
7067 return 0;
7068 case PIPE_B:
7069 if (pipe_config->fdi_lanes <= 2)
7070 return 0;
7071
7072 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7073 other_crtc_state =
7074 intel_atomic_get_crtc_state(state, other_crtc);
7075 if (IS_ERR(other_crtc_state))
7076 return PTR_ERR(other_crtc_state);
7077
7078 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7079 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7080 pipe_name(pipe), pipe_config->fdi_lanes);
7081 return -EINVAL;
7082 }
7083 return 0;
7084 case PIPE_C:
7085 if (pipe_config->fdi_lanes > 2) {
7086 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7087 pipe_name(pipe), pipe_config->fdi_lanes);
7088 return -EINVAL;
7089 }
7090
7091 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7092 other_crtc_state =
7093 intel_atomic_get_crtc_state(state, other_crtc);
7094 if (IS_ERR(other_crtc_state))
7095 return PTR_ERR(other_crtc_state);
7096
7097 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7098 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7099 return -EINVAL;
7100 }
7101 return 0;
7102 default:
7103 BUG();
7104 }
7105 }
7106
7107 #define RETRY 1
7108 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7109 struct intel_crtc_state *pipe_config)
7110 {
7111 struct drm_device *dev = intel_crtc->base.dev;
7112 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7113 int lane, link_bw, fdi_dotclock, ret;
7114 bool needs_recompute = false;
7115
7116 retry:
7117 /* FDI is a binary signal running at ~2.7GHz, encoding
7118 * each output octet as 10 bits. The actual frequency
7119 * is stored as a divider into a 100MHz clock, and the
7120 * mode pixel clock is stored in units of 1KHz.
7121 * Hence the bw of each lane in terms of the mode signal
7122 * is:
7123 */
7124 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7125
7126 fdi_dotclock = adjusted_mode->crtc_clock;
7127
7128 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7129 pipe_config->pipe_bpp);
7130
7131 pipe_config->fdi_lanes = lane;
7132
7133 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7134 link_bw, &pipe_config->fdi_m_n, false);
7135
7136 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7137 if (ret == -EDEADLK)
7138 return ret;
7139
7140 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7141 pipe_config->pipe_bpp -= 2*3;
7142 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7143 pipe_config->pipe_bpp);
7144 needs_recompute = true;
7145 pipe_config->bw_constrained = true;
7146
7147 goto retry;
7148 }
7149
7150 if (needs_recompute)
7151 return RETRY;
7152
7153 return ret;
7154 }
7155
7156 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7157 {
7158 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7159 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7160
7161 /* IPS only exists on ULT machines and is tied to pipe A. */
7162 if (!hsw_crtc_supports_ips(crtc))
7163 return false;
7164
7165 if (!i915_modparams.enable_ips)
7166 return false;
7167
7168 if (crtc_state->pipe_bpp > 24)
7169 return false;
7170
7171 /*
7172 * We compare against max which means we must take
7173 * the increased cdclk requirement into account when
7174 * calculating the new cdclk.
7175 *
7176 * Should measure whether using a lower cdclk w/o IPS
7177 */
7178 if (IS_BROADWELL(dev_priv) &&
7179 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7180 return false;
7181
7182 return true;
7183 }
7184
7185 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7186 {
7187 struct drm_i915_private *dev_priv =
7188 to_i915(crtc_state->base.crtc->dev);
7189 struct intel_atomic_state *intel_state =
7190 to_intel_atomic_state(crtc_state->base.state);
7191
7192 if (!hsw_crtc_state_ips_capable(crtc_state))
7193 return false;
7194
7195 /*
7196 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7197 * enabled and disabled dynamically based on package C states,
7198 * user space can't make reliable use of the CRCs, so let's just
7199 * completely disable it.
7200 */
7201 if (crtc_state->crc_enabled)
7202 return false;
7203
7204 /* IPS should be fine as long as at least one plane is enabled. */
7205 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7206 return false;
7207
7208 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7209 if (IS_BROADWELL(dev_priv) &&
7210 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7211 return false;
7212
7213 return true;
7214 }
7215
7216 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7217 {
7218 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7219
7220 /* GDG double wide on either pipe, otherwise pipe A only */
7221 return INTEL_GEN(dev_priv) < 4 &&
7222 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7223 }
7224
7225 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7226 {
7227 u32 pixel_rate;
7228
7229 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7230
7231 /*
7232 * We only use IF-ID interlacing. If we ever use
7233 * PF-ID we'll need to adjust the pixel_rate here.
7234 */
7235
7236 if (pipe_config->pch_pfit.enabled) {
7237 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7238 u32 pfit_size = pipe_config->pch_pfit.size;
7239
7240 pipe_w = pipe_config->pipe_src_w;
7241 pipe_h = pipe_config->pipe_src_h;
7242
7243 pfit_w = (pfit_size >> 16) & 0xFFFF;
7244 pfit_h = pfit_size & 0xFFFF;
7245 if (pipe_w < pfit_w)
7246 pipe_w = pfit_w;
7247 if (pipe_h < pfit_h)
7248 pipe_h = pfit_h;
7249
7250 if (WARN_ON(!pfit_w || !pfit_h))
7251 return pixel_rate;
7252
7253 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7254 pfit_w * pfit_h);
7255 }
7256
7257 return pixel_rate;
7258 }
7259
7260 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7261 {
7262 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7263
7264 if (HAS_GMCH(dev_priv))
7265 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7266 crtc_state->pixel_rate =
7267 crtc_state->base.adjusted_mode.crtc_clock;
7268 else
7269 crtc_state->pixel_rate =
7270 ilk_pipe_pixel_rate(crtc_state);
7271 }
7272
7273 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7274 struct intel_crtc_state *pipe_config)
7275 {
7276 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7277 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7278 int clock_limit = dev_priv->max_dotclk_freq;
7279
7280 if (INTEL_GEN(dev_priv) < 4) {
7281 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7282
7283 /*
7284 * Enable double wide mode when the dot clock
7285 * is > 90% of the (display) core speed.
7286 */
7287 if (intel_crtc_supports_double_wide(crtc) &&
7288 adjusted_mode->crtc_clock > clock_limit) {
7289 clock_limit = dev_priv->max_dotclk_freq;
7290 pipe_config->double_wide = true;
7291 }
7292 }
7293
7294 if (adjusted_mode->crtc_clock > clock_limit) {
7295 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7296 adjusted_mode->crtc_clock, clock_limit,
7297 yesno(pipe_config->double_wide));
7298 return -EINVAL;
7299 }
7300
7301 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7302 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7303 pipe_config->base.ctm) {
7304 /*
7305 * There is only one pipe CSC unit per pipe, and we need that
7306 * for output conversion from RGB->YCBCR. So if CTM is already
7307 * applied we can't support YCBCR420 output.
7308 */
7309 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7310 return -EINVAL;
7311 }
7312
7313 /*
7314 * Pipe horizontal size must be even in:
7315 * - DVO ganged mode
7316 * - LVDS dual channel mode
7317 * - Double wide pipe
7318 */
7319 if (pipe_config->pipe_src_w & 1) {
7320 if (pipe_config->double_wide) {
7321 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7322 return -EINVAL;
7323 }
7324
7325 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7326 intel_is_dual_link_lvds(dev_priv)) {
7327 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7328 return -EINVAL;
7329 }
7330 }
7331
7332 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7333 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7334 */
7335 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7336 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7337 return -EINVAL;
7338
7339 intel_crtc_compute_pixel_rate(pipe_config);
7340
7341 if (pipe_config->has_pch_encoder)
7342 return ironlake_fdi_compute_config(crtc, pipe_config);
7343
7344 return 0;
7345 }
7346
7347 static void
7348 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7349 {
7350 while (*num > DATA_LINK_M_N_MASK ||
7351 *den > DATA_LINK_M_N_MASK) {
7352 *num >>= 1;
7353 *den >>= 1;
7354 }
7355 }
7356
7357 static void compute_m_n(unsigned int m, unsigned int n,
7358 u32 *ret_m, u32 *ret_n,
7359 bool constant_n)
7360 {
7361 /*
7362 * Several DP dongles in particular seem to be fussy about
7363 * too large link M/N values. Give N value as 0x8000 that
7364 * should be acceptable by specific devices. 0x8000 is the
7365 * specified fixed N value for asynchronous clock mode,
7366 * which the devices expect also in synchronous clock mode.
7367 */
7368 if (constant_n)
7369 *ret_n = 0x8000;
7370 else
7371 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7372
7373 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7374 intel_reduce_m_n_ratio(ret_m, ret_n);
7375 }
7376
7377 void
7378 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7379 int pixel_clock, int link_clock,
7380 struct intel_link_m_n *m_n,
7381 bool constant_n)
7382 {
7383 m_n->tu = 64;
7384
7385 compute_m_n(bits_per_pixel * pixel_clock,
7386 link_clock * nlanes * 8,
7387 &m_n->gmch_m, &m_n->gmch_n,
7388 constant_n);
7389
7390 compute_m_n(pixel_clock, link_clock,
7391 &m_n->link_m, &m_n->link_n,
7392 constant_n);
7393 }
7394
7395 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7396 {
7397 if (i915_modparams.panel_use_ssc >= 0)
7398 return i915_modparams.panel_use_ssc != 0;
7399 return dev_priv->vbt.lvds_use_ssc
7400 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7401 }
7402
7403 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7404 {
7405 return (1 << dpll->n) << 16 | dpll->m2;
7406 }
7407
7408 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7409 {
7410 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7411 }
7412
7413 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7414 struct intel_crtc_state *crtc_state,
7415 struct dpll *reduced_clock)
7416 {
7417 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7418 u32 fp, fp2 = 0;
7419
7420 if (IS_PINEVIEW(dev_priv)) {
7421 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7422 if (reduced_clock)
7423 fp2 = pnv_dpll_compute_fp(reduced_clock);
7424 } else {
7425 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7426 if (reduced_clock)
7427 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7428 }
7429
7430 crtc_state->dpll_hw_state.fp0 = fp;
7431
7432 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7433 reduced_clock) {
7434 crtc_state->dpll_hw_state.fp1 = fp2;
7435 } else {
7436 crtc_state->dpll_hw_state.fp1 = fp;
7437 }
7438 }
7439
7440 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7441 pipe)
7442 {
7443 u32 reg_val;
7444
7445 /*
7446 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7447 * and set it to a reasonable value instead.
7448 */
7449 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7450 reg_val &= 0xffffff00;
7451 reg_val |= 0x00000030;
7452 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7453
7454 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7455 reg_val &= 0x00ffffff;
7456 reg_val |= 0x8c000000;
7457 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7458
7459 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7460 reg_val &= 0xffffff00;
7461 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7462
7463 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7464 reg_val &= 0x00ffffff;
7465 reg_val |= 0xb0000000;
7466 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7467 }
7468
7469 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7470 const struct intel_link_m_n *m_n)
7471 {
7472 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7473 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7474 enum pipe pipe = crtc->pipe;
7475
7476 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7477 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7478 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7479 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7480 }
7481
7482 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7483 enum transcoder transcoder)
7484 {
7485 if (IS_HASWELL(dev_priv))
7486 return transcoder == TRANSCODER_EDP;
7487
7488 /*
7489 * Strictly speaking some registers are available before
7490 * gen7, but we only support DRRS on gen7+
7491 */
7492 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7493 }
7494
7495 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7496 const struct intel_link_m_n *m_n,
7497 const struct intel_link_m_n *m2_n2)
7498 {
7499 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7501 enum pipe pipe = crtc->pipe;
7502 enum transcoder transcoder = crtc_state->cpu_transcoder;
7503
7504 if (INTEL_GEN(dev_priv) >= 5) {
7505 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7506 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7507 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7508 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7509 /*
7510 * M2_N2 registers are set only if DRRS is supported
7511 * (to make sure the registers are not unnecessarily accessed).
7512 */
7513 if (m2_n2 && crtc_state->has_drrs &&
7514 transcoder_has_m2_n2(dev_priv, transcoder)) {
7515 I915_WRITE(PIPE_DATA_M2(transcoder),
7516 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7517 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7518 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7519 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7520 }
7521 } else {
7522 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7523 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7524 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7525 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7526 }
7527 }
7528
7529 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7530 {
7531 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7532
7533 if (m_n == M1_N1) {
7534 dp_m_n = &crtc_state->dp_m_n;
7535 dp_m2_n2 = &crtc_state->dp_m2_n2;
7536 } else if (m_n == M2_N2) {
7537
7538 /*
7539 * M2_N2 registers are not supported. Hence m2_n2 divider value
7540 * needs to be programmed into M1_N1.
7541 */
7542 dp_m_n = &crtc_state->dp_m2_n2;
7543 } else {
7544 DRM_ERROR("Unsupported divider value\n");
7545 return;
7546 }
7547
7548 if (crtc_state->has_pch_encoder)
7549 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7550 else
7551 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7552 }
7553
7554 static void vlv_compute_dpll(struct intel_crtc *crtc,
7555 struct intel_crtc_state *pipe_config)
7556 {
7557 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7558 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7559 if (crtc->pipe != PIPE_A)
7560 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7561
7562 /* DPLL not used with DSI, but still need the rest set up */
7563 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7564 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7565 DPLL_EXT_BUFFER_ENABLE_VLV;
7566
7567 pipe_config->dpll_hw_state.dpll_md =
7568 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7569 }
7570
7571 static void chv_compute_dpll(struct intel_crtc *crtc,
7572 struct intel_crtc_state *pipe_config)
7573 {
7574 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7575 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7576 if (crtc->pipe != PIPE_A)
7577 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7578
7579 /* DPLL not used with DSI, but still need the rest set up */
7580 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7581 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7582
7583 pipe_config->dpll_hw_state.dpll_md =
7584 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7585 }
7586
7587 static void vlv_prepare_pll(struct intel_crtc *crtc,
7588 const struct intel_crtc_state *pipe_config)
7589 {
7590 struct drm_device *dev = crtc->base.dev;
7591 struct drm_i915_private *dev_priv = to_i915(dev);
7592 enum pipe pipe = crtc->pipe;
7593 u32 mdiv;
7594 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7595 u32 coreclk, reg_val;
7596
7597 /* Enable Refclk */
7598 I915_WRITE(DPLL(pipe),
7599 pipe_config->dpll_hw_state.dpll &
7600 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7601
7602 /* No need to actually set up the DPLL with DSI */
7603 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7604 return;
7605
7606 vlv_dpio_get(dev_priv);
7607
7608 bestn = pipe_config->dpll.n;
7609 bestm1 = pipe_config->dpll.m1;
7610 bestm2 = pipe_config->dpll.m2;
7611 bestp1 = pipe_config->dpll.p1;
7612 bestp2 = pipe_config->dpll.p2;
7613
7614 /* See eDP HDMI DPIO driver vbios notes doc */
7615
7616 /* PLL B needs special handling */
7617 if (pipe == PIPE_B)
7618 vlv_pllb_recal_opamp(dev_priv, pipe);
7619
7620 /* Set up Tx target for periodic Rcomp update */
7621 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7622
7623 /* Disable target IRef on PLL */
7624 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7625 reg_val &= 0x00ffffff;
7626 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7627
7628 /* Disable fast lock */
7629 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7630
7631 /* Set idtafcrecal before PLL is enabled */
7632 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7633 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7634 mdiv |= ((bestn << DPIO_N_SHIFT));
7635 mdiv |= (1 << DPIO_K_SHIFT);
7636
7637 /*
7638 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7639 * but we don't support that).
7640 * Note: don't use the DAC post divider as it seems unstable.
7641 */
7642 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7643 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7644
7645 mdiv |= DPIO_ENABLE_CALIBRATION;
7646 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7647
7648 /* Set HBR and RBR LPF coefficients */
7649 if (pipe_config->port_clock == 162000 ||
7650 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7651 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7652 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7653 0x009f0003);
7654 else
7655 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7656 0x00d0000f);
7657
7658 if (intel_crtc_has_dp_encoder(pipe_config)) {
7659 /* Use SSC source */
7660 if (pipe == PIPE_A)
7661 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7662 0x0df40000);
7663 else
7664 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7665 0x0df70000);
7666 } else { /* HDMI or VGA */
7667 /* Use bend source */
7668 if (pipe == PIPE_A)
7669 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7670 0x0df70000);
7671 else
7672 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7673 0x0df40000);
7674 }
7675
7676 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7677 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7678 if (intel_crtc_has_dp_encoder(pipe_config))
7679 coreclk |= 0x01000000;
7680 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7681
7682 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7683
7684 vlv_dpio_put(dev_priv);
7685 }
7686
7687 static void chv_prepare_pll(struct intel_crtc *crtc,
7688 const struct intel_crtc_state *pipe_config)
7689 {
7690 struct drm_device *dev = crtc->base.dev;
7691 struct drm_i915_private *dev_priv = to_i915(dev);
7692 enum pipe pipe = crtc->pipe;
7693 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7694 u32 loopfilter, tribuf_calcntr;
7695 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7696 u32 dpio_val;
7697 int vco;
7698
7699 /* Enable Refclk and SSC */
7700 I915_WRITE(DPLL(pipe),
7701 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7702
7703 /* No need to actually set up the DPLL with DSI */
7704 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7705 return;
7706
7707 bestn = pipe_config->dpll.n;
7708 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7709 bestm1 = pipe_config->dpll.m1;
7710 bestm2 = pipe_config->dpll.m2 >> 22;
7711 bestp1 = pipe_config->dpll.p1;
7712 bestp2 = pipe_config->dpll.p2;
7713 vco = pipe_config->dpll.vco;
7714 dpio_val = 0;
7715 loopfilter = 0;
7716
7717 vlv_dpio_get(dev_priv);
7718
7719 /* p1 and p2 divider */
7720 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7721 5 << DPIO_CHV_S1_DIV_SHIFT |
7722 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7723 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7724 1 << DPIO_CHV_K_DIV_SHIFT);
7725
7726 /* Feedback post-divider - m2 */
7727 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7728
7729 /* Feedback refclk divider - n and m1 */
7730 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7731 DPIO_CHV_M1_DIV_BY_2 |
7732 1 << DPIO_CHV_N_DIV_SHIFT);
7733
7734 /* M2 fraction division */
7735 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7736
7737 /* M2 fraction division enable */
7738 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7739 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7740 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7741 if (bestm2_frac)
7742 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7743 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7744
7745 /* Program digital lock detect threshold */
7746 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7747 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7748 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7749 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7750 if (!bestm2_frac)
7751 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7752 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7753
7754 /* Loop filter */
7755 if (vco == 5400000) {
7756 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7757 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7758 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7759 tribuf_calcntr = 0x9;
7760 } else if (vco <= 6200000) {
7761 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7762 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7763 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7764 tribuf_calcntr = 0x9;
7765 } else if (vco <= 6480000) {
7766 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7767 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7768 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7769 tribuf_calcntr = 0x8;
7770 } else {
7771 /* Not supported. Apply the same limits as in the max case */
7772 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7773 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7774 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7775 tribuf_calcntr = 0;
7776 }
7777 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7778
7779 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7780 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7781 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7782 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7783
7784 /* AFC Recal */
7785 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7786 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7787 DPIO_AFC_RECAL);
7788
7789 vlv_dpio_put(dev_priv);
7790 }
7791
7792 /**
7793 * vlv_force_pll_on - forcibly enable just the PLL
7794 * @dev_priv: i915 private structure
7795 * @pipe: pipe PLL to enable
7796 * @dpll: PLL configuration
7797 *
7798 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7799 * in cases where we need the PLL enabled even when @pipe is not going to
7800 * be enabled.
7801 */
7802 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7803 const struct dpll *dpll)
7804 {
7805 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7806 struct intel_crtc_state *pipe_config;
7807
7808 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7809 if (!pipe_config)
7810 return -ENOMEM;
7811
7812 pipe_config->base.crtc = &crtc->base;
7813 pipe_config->pixel_multiplier = 1;
7814 pipe_config->dpll = *dpll;
7815
7816 if (IS_CHERRYVIEW(dev_priv)) {
7817 chv_compute_dpll(crtc, pipe_config);
7818 chv_prepare_pll(crtc, pipe_config);
7819 chv_enable_pll(crtc, pipe_config);
7820 } else {
7821 vlv_compute_dpll(crtc, pipe_config);
7822 vlv_prepare_pll(crtc, pipe_config);
7823 vlv_enable_pll(crtc, pipe_config);
7824 }
7825
7826 kfree(pipe_config);
7827
7828 return 0;
7829 }
7830
7831 /**
7832 * vlv_force_pll_off - forcibly disable just the PLL
7833 * @dev_priv: i915 private structure
7834 * @pipe: pipe PLL to disable
7835 *
7836 * Disable the PLL for @pipe. To be used in cases where we need
7837 * the PLL enabled even when @pipe is not going to be enabled.
7838 */
7839 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7840 {
7841 if (IS_CHERRYVIEW(dev_priv))
7842 chv_disable_pll(dev_priv, pipe);
7843 else
7844 vlv_disable_pll(dev_priv, pipe);
7845 }
7846
7847 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7848 struct intel_crtc_state *crtc_state,
7849 struct dpll *reduced_clock)
7850 {
7851 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7852 u32 dpll;
7853 struct dpll *clock = &crtc_state->dpll;
7854
7855 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7856
7857 dpll = DPLL_VGA_MODE_DIS;
7858
7859 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7860 dpll |= DPLLB_MODE_LVDS;
7861 else
7862 dpll |= DPLLB_MODE_DAC_SERIAL;
7863
7864 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7865 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7866 dpll |= (crtc_state->pixel_multiplier - 1)
7867 << SDVO_MULTIPLIER_SHIFT_HIRES;
7868 }
7869
7870 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7871 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7872 dpll |= DPLL_SDVO_HIGH_SPEED;
7873
7874 if (intel_crtc_has_dp_encoder(crtc_state))
7875 dpll |= DPLL_SDVO_HIGH_SPEED;
7876
7877 /* compute bitmask from p1 value */
7878 if (IS_PINEVIEW(dev_priv))
7879 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7880 else {
7881 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7882 if (IS_G4X(dev_priv) && reduced_clock)
7883 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7884 }
7885 switch (clock->p2) {
7886 case 5:
7887 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7888 break;
7889 case 7:
7890 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7891 break;
7892 case 10:
7893 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7894 break;
7895 case 14:
7896 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7897 break;
7898 }
7899 if (INTEL_GEN(dev_priv) >= 4)
7900 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7901
7902 if (crtc_state->sdvo_tv_clock)
7903 dpll |= PLL_REF_INPUT_TVCLKINBC;
7904 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7905 intel_panel_use_ssc(dev_priv))
7906 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7907 else
7908 dpll |= PLL_REF_INPUT_DREFCLK;
7909
7910 dpll |= DPLL_VCO_ENABLE;
7911 crtc_state->dpll_hw_state.dpll = dpll;
7912
7913 if (INTEL_GEN(dev_priv) >= 4) {
7914 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7915 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7916 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7917 }
7918 }
7919
7920 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7921 struct intel_crtc_state *crtc_state,
7922 struct dpll *reduced_clock)
7923 {
7924 struct drm_device *dev = crtc->base.dev;
7925 struct drm_i915_private *dev_priv = to_i915(dev);
7926 u32 dpll;
7927 struct dpll *clock = &crtc_state->dpll;
7928
7929 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7930
7931 dpll = DPLL_VGA_MODE_DIS;
7932
7933 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7934 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7935 } else {
7936 if (clock->p1 == 2)
7937 dpll |= PLL_P1_DIVIDE_BY_TWO;
7938 else
7939 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7940 if (clock->p2 == 4)
7941 dpll |= PLL_P2_DIVIDE_BY_4;
7942 }
7943
7944 /*
7945 * Bspec:
7946 * "[Almador Errata}: For the correct operation of the muxed DVO pins
7947 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7948 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7949 * Enable) must be set to “1” in both the DPLL A Control Register
7950 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7951 *
7952 * For simplicity We simply keep both bits always enabled in
7953 * both DPLLS. The spec says we should disable the DVO 2X clock
7954 * when not needed, but this seems to work fine in practice.
7955 */
7956 if (IS_I830(dev_priv) ||
7957 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7958 dpll |= DPLL_DVO_2X_MODE;
7959
7960 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7961 intel_panel_use_ssc(dev_priv))
7962 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7963 else
7964 dpll |= PLL_REF_INPUT_DREFCLK;
7965
7966 dpll |= DPLL_VCO_ENABLE;
7967 crtc_state->dpll_hw_state.dpll = dpll;
7968 }
7969
7970 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7971 {
7972 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7973 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7974 enum pipe pipe = crtc->pipe;
7975 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7976 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7977 u32 crtc_vtotal, crtc_vblank_end;
7978 int vsyncshift = 0;
7979
7980 /* We need to be careful not to changed the adjusted mode, for otherwise
7981 * the hw state checker will get angry at the mismatch. */
7982 crtc_vtotal = adjusted_mode->crtc_vtotal;
7983 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7984
7985 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7986 /* the chip adds 2 halflines automatically */
7987 crtc_vtotal -= 1;
7988 crtc_vblank_end -= 1;
7989
7990 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7991 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7992 else
7993 vsyncshift = adjusted_mode->crtc_hsync_start -
7994 adjusted_mode->crtc_htotal / 2;
7995 if (vsyncshift < 0)
7996 vsyncshift += adjusted_mode->crtc_htotal;
7997 }
7998
7999 if (INTEL_GEN(dev_priv) > 3)
8000 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8001
8002 I915_WRITE(HTOTAL(cpu_transcoder),
8003 (adjusted_mode->crtc_hdisplay - 1) |
8004 ((adjusted_mode->crtc_htotal - 1) << 16));
8005 I915_WRITE(HBLANK(cpu_transcoder),
8006 (adjusted_mode->crtc_hblank_start - 1) |
8007 ((adjusted_mode->crtc_hblank_end - 1) << 16));
8008 I915_WRITE(HSYNC(cpu_transcoder),
8009 (adjusted_mode->crtc_hsync_start - 1) |
8010 ((adjusted_mode->crtc_hsync_end - 1) << 16));
8011
8012 I915_WRITE(VTOTAL(cpu_transcoder),
8013 (adjusted_mode->crtc_vdisplay - 1) |
8014 ((crtc_vtotal - 1) << 16));
8015 I915_WRITE(VBLANK(cpu_transcoder),
8016 (adjusted_mode->crtc_vblank_start - 1) |
8017 ((crtc_vblank_end - 1) << 16));
8018 I915_WRITE(VSYNC(cpu_transcoder),
8019 (adjusted_mode->crtc_vsync_start - 1) |
8020 ((adjusted_mode->crtc_vsync_end - 1) << 16));
8021
8022 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8023 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8024 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8025 * bits. */
8026 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8027 (pipe == PIPE_B || pipe == PIPE_C))
8028 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8029
8030 }
8031
8032 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8033 {
8034 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8035 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8036 enum pipe pipe = crtc->pipe;
8037
8038 /* pipesrc controls the size that is scaled from, which should
8039 * always be the user's requested size.
8040 */
8041 I915_WRITE(PIPESRC(pipe),
8042 ((crtc_state->pipe_src_w - 1) << 16) |
8043 (crtc_state->pipe_src_h - 1));
8044 }
8045
8046 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8047 struct intel_crtc_state *pipe_config)
8048 {
8049 struct drm_device *dev = crtc->base.dev;
8050 struct drm_i915_private *dev_priv = to_i915(dev);
8051 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8052 u32 tmp;
8053
8054 tmp = I915_READ(HTOTAL(cpu_transcoder));
8055 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8056 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8057
8058 if (!transcoder_is_dsi(cpu_transcoder)) {
8059 tmp = I915_READ(HBLANK(cpu_transcoder));
8060 pipe_config->base.adjusted_mode.crtc_hblank_start =
8061 (tmp & 0xffff) + 1;
8062 pipe_config->base.adjusted_mode.crtc_hblank_end =
8063 ((tmp >> 16) & 0xffff) + 1;
8064 }
8065 tmp = I915_READ(HSYNC(cpu_transcoder));
8066 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8067 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8068
8069 tmp = I915_READ(VTOTAL(cpu_transcoder));
8070 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8071 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8072
8073 if (!transcoder_is_dsi(cpu_transcoder)) {
8074 tmp = I915_READ(VBLANK(cpu_transcoder));
8075 pipe_config->base.adjusted_mode.crtc_vblank_start =
8076 (tmp & 0xffff) + 1;
8077 pipe_config->base.adjusted_mode.crtc_vblank_end =
8078 ((tmp >> 16) & 0xffff) + 1;
8079 }
8080 tmp = I915_READ(VSYNC(cpu_transcoder));
8081 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8082 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8083
8084 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
8085 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8086 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8087 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8088 }
8089 }
8090
8091 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8092 struct intel_crtc_state *pipe_config)
8093 {
8094 struct drm_device *dev = crtc->base.dev;
8095 struct drm_i915_private *dev_priv = to_i915(dev);
8096 u32 tmp;
8097
8098 tmp = I915_READ(PIPESRC(crtc->pipe));
8099 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8100 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8101
8102 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8103 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8104 }
8105
8106 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8107 struct intel_crtc_state *pipe_config)
8108 {
8109 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8110 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8111 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8112 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8113
8114 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8115 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8116 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8117 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8118
8119 mode->flags = pipe_config->base.adjusted_mode.flags;
8120 mode->type = DRM_MODE_TYPE_DRIVER;
8121
8122 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8123
8124 mode->hsync = drm_mode_hsync(mode);
8125 mode->vrefresh = drm_mode_vrefresh(mode);
8126 drm_mode_set_name(mode);
8127 }
8128
8129 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8130 {
8131 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8132 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8133 u32 pipeconf;
8134
8135 pipeconf = 0;
8136
8137 /* we keep both pipes enabled on 830 */
8138 if (IS_I830(dev_priv))
8139 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8140
8141 if (crtc_state->double_wide)
8142 pipeconf |= PIPECONF_DOUBLE_WIDE;
8143
8144 /* only g4x and later have fancy bpc/dither controls */
8145 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8146 IS_CHERRYVIEW(dev_priv)) {
8147 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8148 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8149 pipeconf |= PIPECONF_DITHER_EN |
8150 PIPECONF_DITHER_TYPE_SP;
8151
8152 switch (crtc_state->pipe_bpp) {
8153 case 18:
8154 pipeconf |= PIPECONF_6BPC;
8155 break;
8156 case 24:
8157 pipeconf |= PIPECONF_8BPC;
8158 break;
8159 case 30:
8160 pipeconf |= PIPECONF_10BPC;
8161 break;
8162 default:
8163 /* Case prevented by intel_choose_pipe_bpp_dither. */
8164 BUG();
8165 }
8166 }
8167
8168 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8169 if (INTEL_GEN(dev_priv) < 4 ||
8170 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8171 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8172 else
8173 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8174 } else {
8175 pipeconf |= PIPECONF_PROGRESSIVE;
8176 }
8177
8178 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8179 crtc_state->limited_color_range)
8180 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8181
8182 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8183
8184 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8185 POSTING_READ(PIPECONF(crtc->pipe));
8186 }
8187
8188 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8189 struct intel_crtc_state *crtc_state)
8190 {
8191 struct drm_device *dev = crtc->base.dev;
8192 struct drm_i915_private *dev_priv = to_i915(dev);
8193 const struct intel_limit *limit;
8194 int refclk = 48000;
8195
8196 memset(&crtc_state->dpll_hw_state, 0,
8197 sizeof(crtc_state->dpll_hw_state));
8198
8199 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8200 if (intel_panel_use_ssc(dev_priv)) {
8201 refclk = dev_priv->vbt.lvds_ssc_freq;
8202 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8203 }
8204
8205 limit = &intel_limits_i8xx_lvds;
8206 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8207 limit = &intel_limits_i8xx_dvo;
8208 } else {
8209 limit = &intel_limits_i8xx_dac;
8210 }
8211
8212 if (!crtc_state->clock_set &&
8213 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8214 refclk, NULL, &crtc_state->dpll)) {
8215 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8216 return -EINVAL;
8217 }
8218
8219 i8xx_compute_dpll(crtc, crtc_state, NULL);
8220
8221 return 0;
8222 }
8223
8224 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8225 struct intel_crtc_state *crtc_state)
8226 {
8227 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8228 const struct intel_limit *limit;
8229 int refclk = 96000;
8230
8231 memset(&crtc_state->dpll_hw_state, 0,
8232 sizeof(crtc_state->dpll_hw_state));
8233
8234 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8235 if (intel_panel_use_ssc(dev_priv)) {
8236 refclk = dev_priv->vbt.lvds_ssc_freq;
8237 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8238 }
8239
8240 if (intel_is_dual_link_lvds(dev_priv))
8241 limit = &intel_limits_g4x_dual_channel_lvds;
8242 else
8243 limit = &intel_limits_g4x_single_channel_lvds;
8244 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8245 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8246 limit = &intel_limits_g4x_hdmi;
8247 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8248 limit = &intel_limits_g4x_sdvo;
8249 } else {
8250 /* The option is for other outputs */
8251 limit = &intel_limits_i9xx_sdvo;
8252 }
8253
8254 if (!crtc_state->clock_set &&
8255 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8256 refclk, NULL, &crtc_state->dpll)) {
8257 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8258 return -EINVAL;
8259 }
8260
8261 i9xx_compute_dpll(crtc, crtc_state, NULL);
8262
8263 return 0;
8264 }
8265
8266 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8267 struct intel_crtc_state *crtc_state)
8268 {
8269 struct drm_device *dev = crtc->base.dev;
8270 struct drm_i915_private *dev_priv = to_i915(dev);
8271 const struct intel_limit *limit;
8272 int refclk = 96000;
8273
8274 memset(&crtc_state->dpll_hw_state, 0,
8275 sizeof(crtc_state->dpll_hw_state));
8276
8277 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8278 if (intel_panel_use_ssc(dev_priv)) {
8279 refclk = dev_priv->vbt.lvds_ssc_freq;
8280 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8281 }
8282
8283 limit = &intel_limits_pineview_lvds;
8284 } else {
8285 limit = &intel_limits_pineview_sdvo;
8286 }
8287
8288 if (!crtc_state->clock_set &&
8289 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8290 refclk, NULL, &crtc_state->dpll)) {
8291 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8292 return -EINVAL;
8293 }
8294
8295 i9xx_compute_dpll(crtc, crtc_state, NULL);
8296
8297 return 0;
8298 }
8299
8300 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8301 struct intel_crtc_state *crtc_state)
8302 {
8303 struct drm_device *dev = crtc->base.dev;
8304 struct drm_i915_private *dev_priv = to_i915(dev);
8305 const struct intel_limit *limit;
8306 int refclk = 96000;
8307
8308 memset(&crtc_state->dpll_hw_state, 0,
8309 sizeof(crtc_state->dpll_hw_state));
8310
8311 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8312 if (intel_panel_use_ssc(dev_priv)) {
8313 refclk = dev_priv->vbt.lvds_ssc_freq;
8314 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8315 }
8316
8317 limit = &intel_limits_i9xx_lvds;
8318 } else {
8319 limit = &intel_limits_i9xx_sdvo;
8320 }
8321
8322 if (!crtc_state->clock_set &&
8323 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8324 refclk, NULL, &crtc_state->dpll)) {
8325 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8326 return -EINVAL;
8327 }
8328
8329 i9xx_compute_dpll(crtc, crtc_state, NULL);
8330
8331 return 0;
8332 }
8333
8334 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8335 struct intel_crtc_state *crtc_state)
8336 {
8337 int refclk = 100000;
8338 const struct intel_limit *limit = &intel_limits_chv;
8339
8340 memset(&crtc_state->dpll_hw_state, 0,
8341 sizeof(crtc_state->dpll_hw_state));
8342
8343 if (!crtc_state->clock_set &&
8344 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8345 refclk, NULL, &crtc_state->dpll)) {
8346 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8347 return -EINVAL;
8348 }
8349
8350 chv_compute_dpll(crtc, crtc_state);
8351
8352 return 0;
8353 }
8354
8355 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8356 struct intel_crtc_state *crtc_state)
8357 {
8358 int refclk = 100000;
8359 const struct intel_limit *limit = &intel_limits_vlv;
8360
8361 memset(&crtc_state->dpll_hw_state, 0,
8362 sizeof(crtc_state->dpll_hw_state));
8363
8364 if (!crtc_state->clock_set &&
8365 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8366 refclk, NULL, &crtc_state->dpll)) {
8367 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8368 return -EINVAL;
8369 }
8370
8371 vlv_compute_dpll(crtc, crtc_state);
8372
8373 return 0;
8374 }
8375
8376 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8377 {
8378 if (IS_I830(dev_priv))
8379 return false;
8380
8381 return INTEL_GEN(dev_priv) >= 4 ||
8382 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8383 }
8384
8385 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8386 struct intel_crtc_state *pipe_config)
8387 {
8388 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8389 u32 tmp;
8390
8391 if (!i9xx_has_pfit(dev_priv))
8392 return;
8393
8394 tmp = I915_READ(PFIT_CONTROL);
8395 if (!(tmp & PFIT_ENABLE))
8396 return;
8397
8398 /* Check whether the pfit is attached to our pipe. */
8399 if (INTEL_GEN(dev_priv) < 4) {
8400 if (crtc->pipe != PIPE_B)
8401 return;
8402 } else {
8403 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8404 return;
8405 }
8406
8407 pipe_config->gmch_pfit.control = tmp;
8408 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8409 }
8410
8411 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8412 struct intel_crtc_state *pipe_config)
8413 {
8414 struct drm_device *dev = crtc->base.dev;
8415 struct drm_i915_private *dev_priv = to_i915(dev);
8416 int pipe = pipe_config->cpu_transcoder;
8417 struct dpll clock;
8418 u32 mdiv;
8419 int refclk = 100000;
8420
8421 /* In case of DSI, DPLL will not be used */
8422 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8423 return;
8424
8425 vlv_dpio_get(dev_priv);
8426 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8427 vlv_dpio_put(dev_priv);
8428
8429 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8430 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8431 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8432 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8433 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8434
8435 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8436 }
8437
8438 static void
8439 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8440 struct intel_initial_plane_config *plane_config)
8441 {
8442 struct drm_device *dev = crtc->base.dev;
8443 struct drm_i915_private *dev_priv = to_i915(dev);
8444 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8445 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8446 enum pipe pipe;
8447 u32 val, base, offset;
8448 int fourcc, pixel_format;
8449 unsigned int aligned_height;
8450 struct drm_framebuffer *fb;
8451 struct intel_framebuffer *intel_fb;
8452
8453 if (!plane->get_hw_state(plane, &pipe))
8454 return;
8455
8456 WARN_ON(pipe != crtc->pipe);
8457
8458 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8459 if (!intel_fb) {
8460 DRM_DEBUG_KMS("failed to alloc fb\n");
8461 return;
8462 }
8463
8464 fb = &intel_fb->base;
8465
8466 fb->dev = dev;
8467
8468 val = I915_READ(DSPCNTR(i9xx_plane));
8469
8470 if (INTEL_GEN(dev_priv) >= 4) {
8471 if (val & DISPPLANE_TILED) {
8472 plane_config->tiling = I915_TILING_X;
8473 fb->modifier = I915_FORMAT_MOD_X_TILED;
8474 }
8475
8476 if (val & DISPPLANE_ROTATE_180)
8477 plane_config->rotation = DRM_MODE_ROTATE_180;
8478 }
8479
8480 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8481 val & DISPPLANE_MIRROR)
8482 plane_config->rotation |= DRM_MODE_REFLECT_X;
8483
8484 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8485 fourcc = i9xx_format_to_fourcc(pixel_format);
8486 fb->format = drm_format_info(fourcc);
8487
8488 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8489 offset = I915_READ(DSPOFFSET(i9xx_plane));
8490 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8491 } else if (INTEL_GEN(dev_priv) >= 4) {
8492 if (plane_config->tiling)
8493 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8494 else
8495 offset = I915_READ(DSPLINOFF(i9xx_plane));
8496 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8497 } else {
8498 base = I915_READ(DSPADDR(i9xx_plane));
8499 }
8500 plane_config->base = base;
8501
8502 val = I915_READ(PIPESRC(pipe));
8503 fb->width = ((val >> 16) & 0xfff) + 1;
8504 fb->height = ((val >> 0) & 0xfff) + 1;
8505
8506 val = I915_READ(DSPSTRIDE(i9xx_plane));
8507 fb->pitches[0] = val & 0xffffffc0;
8508
8509 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8510
8511 plane_config->size = fb->pitches[0] * aligned_height;
8512
8513 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8514 crtc->base.name, plane->base.name, fb->width, fb->height,
8515 fb->format->cpp[0] * 8, base, fb->pitches[0],
8516 plane_config->size);
8517
8518 plane_config->fb = intel_fb;
8519 }
8520
8521 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8522 struct intel_crtc_state *pipe_config)
8523 {
8524 struct drm_device *dev = crtc->base.dev;
8525 struct drm_i915_private *dev_priv = to_i915(dev);
8526 int pipe = pipe_config->cpu_transcoder;
8527 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8528 struct dpll clock;
8529 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8530 int refclk = 100000;
8531
8532 /* In case of DSI, DPLL will not be used */
8533 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8534 return;
8535
8536 vlv_dpio_get(dev_priv);
8537 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8538 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8539 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8540 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8541 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8542 vlv_dpio_put(dev_priv);
8543
8544 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8545 clock.m2 = (pll_dw0 & 0xff) << 22;
8546 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8547 clock.m2 |= pll_dw2 & 0x3fffff;
8548 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8549 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8550 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8551
8552 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8553 }
8554
8555 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8556 struct intel_crtc_state *pipe_config)
8557 {
8558 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8559 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8560
8561 pipe_config->lspcon_downsampling = false;
8562
8563 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8564 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8565
8566 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8567 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8568 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8569
8570 if (ycbcr420_enabled) {
8571 /* We support 4:2:0 in full blend mode only */
8572 if (!blend)
8573 output = INTEL_OUTPUT_FORMAT_INVALID;
8574 else if (!(IS_GEMINILAKE(dev_priv) ||
8575 INTEL_GEN(dev_priv) >= 10))
8576 output = INTEL_OUTPUT_FORMAT_INVALID;
8577 else
8578 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8579 } else {
8580 /*
8581 * Currently there is no interface defined to
8582 * check user preference between RGB/YCBCR444
8583 * or YCBCR420. So the only possible case for
8584 * YCBCR444 usage is driving YCBCR420 output
8585 * with LSPCON, when pipe is configured for
8586 * YCBCR444 output and LSPCON takes care of
8587 * downsampling it.
8588 */
8589 pipe_config->lspcon_downsampling = true;
8590 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8591 }
8592 }
8593 }
8594
8595 pipe_config->output_format = output;
8596 }
8597
8598 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8599 {
8600 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8601 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8602 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8603 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8604 u32 tmp;
8605
8606 tmp = I915_READ(DSPCNTR(i9xx_plane));
8607
8608 if (tmp & DISPPLANE_GAMMA_ENABLE)
8609 crtc_state->gamma_enable = true;
8610
8611 if (!HAS_GMCH(dev_priv) &&
8612 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8613 crtc_state->csc_enable = true;
8614 }
8615
8616 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8617 struct intel_crtc_state *pipe_config)
8618 {
8619 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8620 enum intel_display_power_domain power_domain;
8621 intel_wakeref_t wakeref;
8622 u32 tmp;
8623 bool ret;
8624
8625 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8626 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8627 if (!wakeref)
8628 return false;
8629
8630 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8631 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8632 pipe_config->shared_dpll = NULL;
8633
8634 ret = false;
8635
8636 tmp = I915_READ(PIPECONF(crtc->pipe));
8637 if (!(tmp & PIPECONF_ENABLE))
8638 goto out;
8639
8640 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8641 IS_CHERRYVIEW(dev_priv)) {
8642 switch (tmp & PIPECONF_BPC_MASK) {
8643 case PIPECONF_6BPC:
8644 pipe_config->pipe_bpp = 18;
8645 break;
8646 case PIPECONF_8BPC:
8647 pipe_config->pipe_bpp = 24;
8648 break;
8649 case PIPECONF_10BPC:
8650 pipe_config->pipe_bpp = 30;
8651 break;
8652 default:
8653 break;
8654 }
8655 }
8656
8657 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8658 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8659 pipe_config->limited_color_range = true;
8660
8661 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8662 PIPECONF_GAMMA_MODE_SHIFT;
8663
8664 if (IS_CHERRYVIEW(dev_priv))
8665 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8666
8667 i9xx_get_pipe_color_config(pipe_config);
8668 intel_color_get_config(pipe_config);
8669
8670 if (INTEL_GEN(dev_priv) < 4)
8671 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8672
8673 intel_get_pipe_timings(crtc, pipe_config);
8674 intel_get_pipe_src_size(crtc, pipe_config);
8675
8676 i9xx_get_pfit_config(crtc, pipe_config);
8677
8678 if (INTEL_GEN(dev_priv) >= 4) {
8679 /* No way to read it out on pipes B and C */
8680 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8681 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8682 else
8683 tmp = I915_READ(DPLL_MD(crtc->pipe));
8684 pipe_config->pixel_multiplier =
8685 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8686 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8687 pipe_config->dpll_hw_state.dpll_md = tmp;
8688 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8689 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8690 tmp = I915_READ(DPLL(crtc->pipe));
8691 pipe_config->pixel_multiplier =
8692 ((tmp & SDVO_MULTIPLIER_MASK)
8693 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8694 } else {
8695 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8696 * port and will be fixed up in the encoder->get_config
8697 * function. */
8698 pipe_config->pixel_multiplier = 1;
8699 }
8700 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8701 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8702 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8703 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8704 } else {
8705 /* Mask out read-only status bits. */
8706 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8707 DPLL_PORTC_READY_MASK |
8708 DPLL_PORTB_READY_MASK);
8709 }
8710
8711 if (IS_CHERRYVIEW(dev_priv))
8712 chv_crtc_clock_get(crtc, pipe_config);
8713 else if (IS_VALLEYVIEW(dev_priv))
8714 vlv_crtc_clock_get(crtc, pipe_config);
8715 else
8716 i9xx_crtc_clock_get(crtc, pipe_config);
8717
8718 /*
8719 * Normally the dotclock is filled in by the encoder .get_config()
8720 * but in case the pipe is enabled w/o any ports we need a sane
8721 * default.
8722 */
8723 pipe_config->base.adjusted_mode.crtc_clock =
8724 pipe_config->port_clock / pipe_config->pixel_multiplier;
8725
8726 ret = true;
8727
8728 out:
8729 intel_display_power_put(dev_priv, power_domain, wakeref);
8730
8731 return ret;
8732 }
8733
8734 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8735 {
8736 struct intel_encoder *encoder;
8737 int i;
8738 u32 val, final;
8739 bool has_lvds = false;
8740 bool has_cpu_edp = false;
8741 bool has_panel = false;
8742 bool has_ck505 = false;
8743 bool can_ssc = false;
8744 bool using_ssc_source = false;
8745
8746 /* We need to take the global config into account */
8747 for_each_intel_encoder(&dev_priv->drm, encoder) {
8748 switch (encoder->type) {
8749 case INTEL_OUTPUT_LVDS:
8750 has_panel = true;
8751 has_lvds = true;
8752 break;
8753 case INTEL_OUTPUT_EDP:
8754 has_panel = true;
8755 if (encoder->port == PORT_A)
8756 has_cpu_edp = true;
8757 break;
8758 default:
8759 break;
8760 }
8761 }
8762
8763 if (HAS_PCH_IBX(dev_priv)) {
8764 has_ck505 = dev_priv->vbt.display_clock_mode;
8765 can_ssc = has_ck505;
8766 } else {
8767 has_ck505 = false;
8768 can_ssc = true;
8769 }
8770
8771 /* Check if any DPLLs are using the SSC source */
8772 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8773 u32 temp = I915_READ(PCH_DPLL(i));
8774
8775 if (!(temp & DPLL_VCO_ENABLE))
8776 continue;
8777
8778 if ((temp & PLL_REF_INPUT_MASK) ==
8779 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8780 using_ssc_source = true;
8781 break;
8782 }
8783 }
8784
8785 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8786 has_panel, has_lvds, has_ck505, using_ssc_source);
8787
8788 /* Ironlake: try to setup display ref clock before DPLL
8789 * enabling. This is only under driver's control after
8790 * PCH B stepping, previous chipset stepping should be
8791 * ignoring this setting.
8792 */
8793 val = I915_READ(PCH_DREF_CONTROL);
8794
8795 /* As we must carefully and slowly disable/enable each source in turn,
8796 * compute the final state we want first and check if we need to
8797 * make any changes at all.
8798 */
8799 final = val;
8800 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8801 if (has_ck505)
8802 final |= DREF_NONSPREAD_CK505_ENABLE;
8803 else
8804 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8805
8806 final &= ~DREF_SSC_SOURCE_MASK;
8807 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8808 final &= ~DREF_SSC1_ENABLE;
8809
8810 if (has_panel) {
8811 final |= DREF_SSC_SOURCE_ENABLE;
8812
8813 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8814 final |= DREF_SSC1_ENABLE;
8815
8816 if (has_cpu_edp) {
8817 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8818 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8819 else
8820 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8821 } else
8822 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8823 } else if (using_ssc_source) {
8824 final |= DREF_SSC_SOURCE_ENABLE;
8825 final |= DREF_SSC1_ENABLE;
8826 }
8827
8828 if (final == val)
8829 return;
8830
8831 /* Always enable nonspread source */
8832 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8833
8834 if (has_ck505)
8835 val |= DREF_NONSPREAD_CK505_ENABLE;
8836 else
8837 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8838
8839 if (has_panel) {
8840 val &= ~DREF_SSC_SOURCE_MASK;
8841 val |= DREF_SSC_SOURCE_ENABLE;
8842
8843 /* SSC must be turned on before enabling the CPU output */
8844 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8845 DRM_DEBUG_KMS("Using SSC on panel\n");
8846 val |= DREF_SSC1_ENABLE;
8847 } else
8848 val &= ~DREF_SSC1_ENABLE;
8849
8850 /* Get SSC going before enabling the outputs */
8851 I915_WRITE(PCH_DREF_CONTROL, val);
8852 POSTING_READ(PCH_DREF_CONTROL);
8853 udelay(200);
8854
8855 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8856
8857 /* Enable CPU source on CPU attached eDP */
8858 if (has_cpu_edp) {
8859 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8860 DRM_DEBUG_KMS("Using SSC on eDP\n");
8861 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8862 } else
8863 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8864 } else
8865 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8866
8867 I915_WRITE(PCH_DREF_CONTROL, val);
8868 POSTING_READ(PCH_DREF_CONTROL);
8869 udelay(200);
8870 } else {
8871 DRM_DEBUG_KMS("Disabling CPU source output\n");
8872
8873 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8874
8875 /* Turn off CPU output */
8876 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8877
8878 I915_WRITE(PCH_DREF_CONTROL, val);
8879 POSTING_READ(PCH_DREF_CONTROL);
8880 udelay(200);
8881
8882 if (!using_ssc_source) {
8883 DRM_DEBUG_KMS("Disabling SSC source\n");
8884
8885 /* Turn off the SSC source */
8886 val &= ~DREF_SSC_SOURCE_MASK;
8887 val |= DREF_SSC_SOURCE_DISABLE;
8888
8889 /* Turn off SSC1 */
8890 val &= ~DREF_SSC1_ENABLE;
8891
8892 I915_WRITE(PCH_DREF_CONTROL, val);
8893 POSTING_READ(PCH_DREF_CONTROL);
8894 udelay(200);
8895 }
8896 }
8897
8898 BUG_ON(val != final);
8899 }
8900
8901 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8902 {
8903 u32 tmp;
8904
8905 tmp = I915_READ(SOUTH_CHICKEN2);
8906 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8907 I915_WRITE(SOUTH_CHICKEN2, tmp);
8908
8909 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8910 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8911 DRM_ERROR("FDI mPHY reset assert timeout\n");
8912
8913 tmp = I915_READ(SOUTH_CHICKEN2);
8914 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8915 I915_WRITE(SOUTH_CHICKEN2, tmp);
8916
8917 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8918 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8919 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8920 }
8921
8922 /* WaMPhyProgramming:hsw */
8923 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8924 {
8925 u32 tmp;
8926
8927 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8928 tmp &= ~(0xFF << 24);
8929 tmp |= (0x12 << 24);
8930 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8931
8932 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8933 tmp |= (1 << 11);
8934 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8935
8936 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8937 tmp |= (1 << 11);
8938 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8939
8940 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8941 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8942 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8943
8944 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8945 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8946 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8947
8948 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8949 tmp &= ~(7 << 13);
8950 tmp |= (5 << 13);
8951 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8952
8953 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8954 tmp &= ~(7 << 13);
8955 tmp |= (5 << 13);
8956 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8957
8958 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8959 tmp &= ~0xFF;
8960 tmp |= 0x1C;
8961 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8962
8963 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8964 tmp &= ~0xFF;
8965 tmp |= 0x1C;
8966 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8967
8968 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8969 tmp &= ~(0xFF << 16);
8970 tmp |= (0x1C << 16);
8971 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8972
8973 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8974 tmp &= ~(0xFF << 16);
8975 tmp |= (0x1C << 16);
8976 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8977
8978 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8979 tmp |= (1 << 27);
8980 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8981
8982 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8983 tmp |= (1 << 27);
8984 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8985
8986 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8987 tmp &= ~(0xF << 28);
8988 tmp |= (4 << 28);
8989 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8990
8991 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8992 tmp &= ~(0xF << 28);
8993 tmp |= (4 << 28);
8994 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8995 }
8996
8997 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8998 * Programming" based on the parameters passed:
8999 * - Sequence to enable CLKOUT_DP
9000 * - Sequence to enable CLKOUT_DP without spread
9001 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9002 */
9003 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9004 bool with_spread, bool with_fdi)
9005 {
9006 u32 reg, tmp;
9007
9008 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9009 with_spread = true;
9010 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9011 with_fdi, "LP PCH doesn't have FDI\n"))
9012 with_fdi = false;
9013
9014 mutex_lock(&dev_priv->sb_lock);
9015
9016 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9017 tmp &= ~SBI_SSCCTL_DISABLE;
9018 tmp |= SBI_SSCCTL_PATHALT;
9019 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9020
9021 udelay(24);
9022
9023 if (with_spread) {
9024 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9025 tmp &= ~SBI_SSCCTL_PATHALT;
9026 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9027
9028 if (with_fdi) {
9029 lpt_reset_fdi_mphy(dev_priv);
9030 lpt_program_fdi_mphy(dev_priv);
9031 }
9032 }
9033
9034 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9035 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9036 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9037 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9038
9039 mutex_unlock(&dev_priv->sb_lock);
9040 }
9041
9042 /* Sequence to disable CLKOUT_DP */
9043 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9044 {
9045 u32 reg, tmp;
9046
9047 mutex_lock(&dev_priv->sb_lock);
9048
9049 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9050 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9051 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9052 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9053
9054 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9055 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9056 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9057 tmp |= SBI_SSCCTL_PATHALT;
9058 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9059 udelay(32);
9060 }
9061 tmp |= SBI_SSCCTL_DISABLE;
9062 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9063 }
9064
9065 mutex_unlock(&dev_priv->sb_lock);
9066 }
9067
9068 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9069
9070 static const u16 sscdivintphase[] = {
9071 [BEND_IDX( 50)] = 0x3B23,
9072 [BEND_IDX( 45)] = 0x3B23,
9073 [BEND_IDX( 40)] = 0x3C23,
9074 [BEND_IDX( 35)] = 0x3C23,
9075 [BEND_IDX( 30)] = 0x3D23,
9076 [BEND_IDX( 25)] = 0x3D23,
9077 [BEND_IDX( 20)] = 0x3E23,
9078 [BEND_IDX( 15)] = 0x3E23,
9079 [BEND_IDX( 10)] = 0x3F23,
9080 [BEND_IDX( 5)] = 0x3F23,
9081 [BEND_IDX( 0)] = 0x0025,
9082 [BEND_IDX( -5)] = 0x0025,
9083 [BEND_IDX(-10)] = 0x0125,
9084 [BEND_IDX(-15)] = 0x0125,
9085 [BEND_IDX(-20)] = 0x0225,
9086 [BEND_IDX(-25)] = 0x0225,
9087 [BEND_IDX(-30)] = 0x0325,
9088 [BEND_IDX(-35)] = 0x0325,
9089 [BEND_IDX(-40)] = 0x0425,
9090 [BEND_IDX(-45)] = 0x0425,
9091 [BEND_IDX(-50)] = 0x0525,
9092 };
9093
9094 /*
9095 * Bend CLKOUT_DP
9096 * steps -50 to 50 inclusive, in steps of 5
9097 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9098 * change in clock period = -(steps / 10) * 5.787 ps
9099 */
9100 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9101 {
9102 u32 tmp;
9103 int idx = BEND_IDX(steps);
9104
9105 if (WARN_ON(steps % 5 != 0))
9106 return;
9107
9108 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9109 return;
9110
9111 mutex_lock(&dev_priv->sb_lock);
9112
9113 if (steps % 10 != 0)
9114 tmp = 0xAAAAAAAB;
9115 else
9116 tmp = 0x00000000;
9117 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9118
9119 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9120 tmp &= 0xffff0000;
9121 tmp |= sscdivintphase[idx];
9122 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9123
9124 mutex_unlock(&dev_priv->sb_lock);
9125 }
9126
9127 #undef BEND_IDX
9128
9129 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9130 {
9131 struct intel_encoder *encoder;
9132 bool has_vga = false;
9133
9134 for_each_intel_encoder(&dev_priv->drm, encoder) {
9135 switch (encoder->type) {
9136 case INTEL_OUTPUT_ANALOG:
9137 has_vga = true;
9138 break;
9139 default:
9140 break;
9141 }
9142 }
9143
9144 if (has_vga) {
9145 lpt_bend_clkout_dp(dev_priv, 0);
9146 lpt_enable_clkout_dp(dev_priv, true, true);
9147 } else {
9148 lpt_disable_clkout_dp(dev_priv);
9149 }
9150 }
9151
9152 /*
9153 * Initialize reference clocks when the driver loads
9154 */
9155 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9156 {
9157 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9158 ironlake_init_pch_refclk(dev_priv);
9159 else if (HAS_PCH_LPT(dev_priv))
9160 lpt_init_pch_refclk(dev_priv);
9161 }
9162
9163 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9164 {
9165 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9166 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9167 enum pipe pipe = crtc->pipe;
9168 u32 val;
9169
9170 val = 0;
9171
9172 switch (crtc_state->pipe_bpp) {
9173 case 18:
9174 val |= PIPECONF_6BPC;
9175 break;
9176 case 24:
9177 val |= PIPECONF_8BPC;
9178 break;
9179 case 30:
9180 val |= PIPECONF_10BPC;
9181 break;
9182 case 36:
9183 val |= PIPECONF_12BPC;
9184 break;
9185 default:
9186 /* Case prevented by intel_choose_pipe_bpp_dither. */
9187 BUG();
9188 }
9189
9190 if (crtc_state->dither)
9191 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9192
9193 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9194 val |= PIPECONF_INTERLACED_ILK;
9195 else
9196 val |= PIPECONF_PROGRESSIVE;
9197
9198 if (crtc_state->limited_color_range)
9199 val |= PIPECONF_COLOR_RANGE_SELECT;
9200
9201 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9202
9203 I915_WRITE(PIPECONF(pipe), val);
9204 POSTING_READ(PIPECONF(pipe));
9205 }
9206
9207 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9208 {
9209 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9210 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9211 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9212 u32 val = 0;
9213
9214 if (IS_HASWELL(dev_priv) && crtc_state->dither)
9215 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9216
9217 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9218 val |= PIPECONF_INTERLACED_ILK;
9219 else
9220 val |= PIPECONF_PROGRESSIVE;
9221
9222 I915_WRITE(PIPECONF(cpu_transcoder), val);
9223 POSTING_READ(PIPECONF(cpu_transcoder));
9224 }
9225
9226 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9227 {
9228 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9229 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9230 u32 val = 0;
9231
9232 switch (crtc_state->pipe_bpp) {
9233 case 18:
9234 val |= PIPEMISC_DITHER_6_BPC;
9235 break;
9236 case 24:
9237 val |= PIPEMISC_DITHER_8_BPC;
9238 break;
9239 case 30:
9240 val |= PIPEMISC_DITHER_10_BPC;
9241 break;
9242 case 36:
9243 val |= PIPEMISC_DITHER_12_BPC;
9244 break;
9245 default:
9246 MISSING_CASE(crtc_state->pipe_bpp);
9247 break;
9248 }
9249
9250 if (crtc_state->dither)
9251 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9252
9253 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9254 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9255 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9256
9257 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9258 val |= PIPEMISC_YUV420_ENABLE |
9259 PIPEMISC_YUV420_MODE_FULL_BLEND;
9260
9261 if (INTEL_GEN(dev_priv) >= 11 &&
9262 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9263 BIT(PLANE_CURSOR))) == 0)
9264 val |= PIPEMISC_HDR_MODE_PRECISION;
9265
9266 I915_WRITE(PIPEMISC(crtc->pipe), val);
9267 }
9268
9269 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9270 {
9271 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9272 u32 tmp;
9273
9274 tmp = I915_READ(PIPEMISC(crtc->pipe));
9275
9276 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9277 case PIPEMISC_DITHER_6_BPC:
9278 return 18;
9279 case PIPEMISC_DITHER_8_BPC:
9280 return 24;
9281 case PIPEMISC_DITHER_10_BPC:
9282 return 30;
9283 case PIPEMISC_DITHER_12_BPC:
9284 return 36;
9285 default:
9286 MISSING_CASE(tmp);
9287 return 0;
9288 }
9289 }
9290
9291 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9292 {
9293 /*
9294 * Account for spread spectrum to avoid
9295 * oversubscribing the link. Max center spread
9296 * is 2.5%; use 5% for safety's sake.
9297 */
9298 u32 bps = target_clock * bpp * 21 / 20;
9299 return DIV_ROUND_UP(bps, link_bw * 8);
9300 }
9301
9302 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9303 {
9304 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9305 }
9306
9307 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9308 struct intel_crtc_state *crtc_state,
9309 struct dpll *reduced_clock)
9310 {
9311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9312 u32 dpll, fp, fp2;
9313 int factor;
9314
9315 /* Enable autotuning of the PLL clock (if permissible) */
9316 factor = 21;
9317 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9318 if ((intel_panel_use_ssc(dev_priv) &&
9319 dev_priv->vbt.lvds_ssc_freq == 100000) ||
9320 (HAS_PCH_IBX(dev_priv) &&
9321 intel_is_dual_link_lvds(dev_priv)))
9322 factor = 25;
9323 } else if (crtc_state->sdvo_tv_clock) {
9324 factor = 20;
9325 }
9326
9327 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9328
9329 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9330 fp |= FP_CB_TUNE;
9331
9332 if (reduced_clock) {
9333 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9334
9335 if (reduced_clock->m < factor * reduced_clock->n)
9336 fp2 |= FP_CB_TUNE;
9337 } else {
9338 fp2 = fp;
9339 }
9340
9341 dpll = 0;
9342
9343 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9344 dpll |= DPLLB_MODE_LVDS;
9345 else
9346 dpll |= DPLLB_MODE_DAC_SERIAL;
9347
9348 dpll |= (crtc_state->pixel_multiplier - 1)
9349 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9350
9351 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9352 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9353 dpll |= DPLL_SDVO_HIGH_SPEED;
9354
9355 if (intel_crtc_has_dp_encoder(crtc_state))
9356 dpll |= DPLL_SDVO_HIGH_SPEED;
9357
9358 /*
9359 * The high speed IO clock is only really required for
9360 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9361 * possible to share the DPLL between CRT and HDMI. Enabling
9362 * the clock needlessly does no real harm, except use up a
9363 * bit of power potentially.
9364 *
9365 * We'll limit this to IVB with 3 pipes, since it has only two
9366 * DPLLs and so DPLL sharing is the only way to get three pipes
9367 * driving PCH ports at the same time. On SNB we could do this,
9368 * and potentially avoid enabling the second DPLL, but it's not
9369 * clear if it''s a win or loss power wise. No point in doing
9370 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9371 */
9372 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9373 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9374 dpll |= DPLL_SDVO_HIGH_SPEED;
9375
9376 /* compute bitmask from p1 value */
9377 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9378 /* also FPA1 */
9379 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9380
9381 switch (crtc_state->dpll.p2) {
9382 case 5:
9383 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9384 break;
9385 case 7:
9386 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9387 break;
9388 case 10:
9389 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9390 break;
9391 case 14:
9392 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9393 break;
9394 }
9395
9396 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9397 intel_panel_use_ssc(dev_priv))
9398 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9399 else
9400 dpll |= PLL_REF_INPUT_DREFCLK;
9401
9402 dpll |= DPLL_VCO_ENABLE;
9403
9404 crtc_state->dpll_hw_state.dpll = dpll;
9405 crtc_state->dpll_hw_state.fp0 = fp;
9406 crtc_state->dpll_hw_state.fp1 = fp2;
9407 }
9408
9409 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9410 struct intel_crtc_state *crtc_state)
9411 {
9412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9413 const struct intel_limit *limit;
9414 int refclk = 120000;
9415
9416 memset(&crtc_state->dpll_hw_state, 0,
9417 sizeof(crtc_state->dpll_hw_state));
9418
9419 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9420 if (!crtc_state->has_pch_encoder)
9421 return 0;
9422
9423 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9424 if (intel_panel_use_ssc(dev_priv)) {
9425 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9426 dev_priv->vbt.lvds_ssc_freq);
9427 refclk = dev_priv->vbt.lvds_ssc_freq;
9428 }
9429
9430 if (intel_is_dual_link_lvds(dev_priv)) {
9431 if (refclk == 100000)
9432 limit = &intel_limits_ironlake_dual_lvds_100m;
9433 else
9434 limit = &intel_limits_ironlake_dual_lvds;
9435 } else {
9436 if (refclk == 100000)
9437 limit = &intel_limits_ironlake_single_lvds_100m;
9438 else
9439 limit = &intel_limits_ironlake_single_lvds;
9440 }
9441 } else {
9442 limit = &intel_limits_ironlake_dac;
9443 }
9444
9445 if (!crtc_state->clock_set &&
9446 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9447 refclk, NULL, &crtc_state->dpll)) {
9448 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9449 return -EINVAL;
9450 }
9451
9452 ironlake_compute_dpll(crtc, crtc_state, NULL);
9453
9454 if (!intel_get_shared_dpll(crtc_state, NULL)) {
9455 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9456 pipe_name(crtc->pipe));
9457 return -EINVAL;
9458 }
9459
9460 return 0;
9461 }
9462
9463 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9464 struct intel_link_m_n *m_n)
9465 {
9466 struct drm_device *dev = crtc->base.dev;
9467 struct drm_i915_private *dev_priv = to_i915(dev);
9468 enum pipe pipe = crtc->pipe;
9469
9470 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9471 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9472 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9473 & ~TU_SIZE_MASK;
9474 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9475 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9476 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9477 }
9478
9479 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9480 enum transcoder transcoder,
9481 struct intel_link_m_n *m_n,
9482 struct intel_link_m_n *m2_n2)
9483 {
9484 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9485 enum pipe pipe = crtc->pipe;
9486
9487 if (INTEL_GEN(dev_priv) >= 5) {
9488 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9489 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9490 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9491 & ~TU_SIZE_MASK;
9492 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9493 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9494 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9495
9496 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9497 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9498 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9499 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9500 & ~TU_SIZE_MASK;
9501 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9502 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9503 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9504 }
9505 } else {
9506 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9507 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9508 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9509 & ~TU_SIZE_MASK;
9510 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9511 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9512 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9513 }
9514 }
9515
9516 void intel_dp_get_m_n(struct intel_crtc *crtc,
9517 struct intel_crtc_state *pipe_config)
9518 {
9519 if (pipe_config->has_pch_encoder)
9520 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9521 else
9522 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9523 &pipe_config->dp_m_n,
9524 &pipe_config->dp_m2_n2);
9525 }
9526
9527 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9528 struct intel_crtc_state *pipe_config)
9529 {
9530 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9531 &pipe_config->fdi_m_n, NULL);
9532 }
9533
9534 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9535 struct intel_crtc_state *pipe_config)
9536 {
9537 struct drm_device *dev = crtc->base.dev;
9538 struct drm_i915_private *dev_priv = to_i915(dev);
9539 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9540 u32 ps_ctrl = 0;
9541 int id = -1;
9542 int i;
9543
9544 /* find scaler attached to this pipe */
9545 for (i = 0; i < crtc->num_scalers; i++) {
9546 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9547 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9548 id = i;
9549 pipe_config->pch_pfit.enabled = true;
9550 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9551 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9552 scaler_state->scalers[i].in_use = true;
9553 break;
9554 }
9555 }
9556
9557 scaler_state->scaler_id = id;
9558 if (id >= 0) {
9559 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9560 } else {
9561 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9562 }
9563 }
9564
9565 static void
9566 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9567 struct intel_initial_plane_config *plane_config)
9568 {
9569 struct drm_device *dev = crtc->base.dev;
9570 struct drm_i915_private *dev_priv = to_i915(dev);
9571 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9572 enum plane_id plane_id = plane->id;
9573 enum pipe pipe;
9574 u32 val, base, offset, stride_mult, tiling, alpha;
9575 int fourcc, pixel_format;
9576 unsigned int aligned_height;
9577 struct drm_framebuffer *fb;
9578 struct intel_framebuffer *intel_fb;
9579
9580 if (!plane->get_hw_state(plane, &pipe))
9581 return;
9582
9583 WARN_ON(pipe != crtc->pipe);
9584
9585 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9586 if (!intel_fb) {
9587 DRM_DEBUG_KMS("failed to alloc fb\n");
9588 return;
9589 }
9590
9591 fb = &intel_fb->base;
9592
9593 fb->dev = dev;
9594
9595 val = I915_READ(PLANE_CTL(pipe, plane_id));
9596
9597 if (INTEL_GEN(dev_priv) >= 11)
9598 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9599 else
9600 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9601
9602 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9603 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9604 alpha &= PLANE_COLOR_ALPHA_MASK;
9605 } else {
9606 alpha = val & PLANE_CTL_ALPHA_MASK;
9607 }
9608
9609 fourcc = skl_format_to_fourcc(pixel_format,
9610 val & PLANE_CTL_ORDER_RGBX, alpha);
9611 fb->format = drm_format_info(fourcc);
9612
9613 tiling = val & PLANE_CTL_TILED_MASK;
9614 switch (tiling) {
9615 case PLANE_CTL_TILED_LINEAR:
9616 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9617 break;
9618 case PLANE_CTL_TILED_X:
9619 plane_config->tiling = I915_TILING_X;
9620 fb->modifier = I915_FORMAT_MOD_X_TILED;
9621 break;
9622 case PLANE_CTL_TILED_Y:
9623 plane_config->tiling = I915_TILING_Y;
9624 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9625 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9626 else
9627 fb->modifier = I915_FORMAT_MOD_Y_TILED;
9628 break;
9629 case PLANE_CTL_TILED_YF:
9630 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9631 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9632 else
9633 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9634 break;
9635 default:
9636 MISSING_CASE(tiling);
9637 goto error;
9638 }
9639
9640 /*
9641 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9642 * while i915 HW rotation is clockwise, thats why this swapping.
9643 */
9644 switch (val & PLANE_CTL_ROTATE_MASK) {
9645 case PLANE_CTL_ROTATE_0:
9646 plane_config->rotation = DRM_MODE_ROTATE_0;
9647 break;
9648 case PLANE_CTL_ROTATE_90:
9649 plane_config->rotation = DRM_MODE_ROTATE_270;
9650 break;
9651 case PLANE_CTL_ROTATE_180:
9652 plane_config->rotation = DRM_MODE_ROTATE_180;
9653 break;
9654 case PLANE_CTL_ROTATE_270:
9655 plane_config->rotation = DRM_MODE_ROTATE_90;
9656 break;
9657 }
9658
9659 if (INTEL_GEN(dev_priv) >= 10 &&
9660 val & PLANE_CTL_FLIP_HORIZONTAL)
9661 plane_config->rotation |= DRM_MODE_REFLECT_X;
9662
9663 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9664 plane_config->base = base;
9665
9666 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9667
9668 val = I915_READ(PLANE_SIZE(pipe, plane_id));
9669 fb->height = ((val >> 16) & 0xfff) + 1;
9670 fb->width = ((val >> 0) & 0x1fff) + 1;
9671
9672 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9673 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9674 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9675
9676 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9677
9678 plane_config->size = fb->pitches[0] * aligned_height;
9679
9680 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9681 crtc->base.name, plane->base.name, fb->width, fb->height,
9682 fb->format->cpp[0] * 8, base, fb->pitches[0],
9683 plane_config->size);
9684
9685 plane_config->fb = intel_fb;
9686 return;
9687
9688 error:
9689 kfree(intel_fb);
9690 }
9691
9692 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9693 struct intel_crtc_state *pipe_config)
9694 {
9695 struct drm_device *dev = crtc->base.dev;
9696 struct drm_i915_private *dev_priv = to_i915(dev);
9697 u32 tmp;
9698
9699 tmp = I915_READ(PF_CTL(crtc->pipe));
9700
9701 if (tmp & PF_ENABLE) {
9702 pipe_config->pch_pfit.enabled = true;
9703 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9704 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9705
9706 /* We currently do not free assignements of panel fitters on
9707 * ivb/hsw (since we don't use the higher upscaling modes which
9708 * differentiates them) so just WARN about this case for now. */
9709 if (IS_GEN(dev_priv, 7)) {
9710 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9711 PF_PIPE_SEL_IVB(crtc->pipe));
9712 }
9713 }
9714 }
9715
9716 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9717 struct intel_crtc_state *pipe_config)
9718 {
9719 struct drm_device *dev = crtc->base.dev;
9720 struct drm_i915_private *dev_priv = to_i915(dev);
9721 enum intel_display_power_domain power_domain;
9722 intel_wakeref_t wakeref;
9723 u32 tmp;
9724 bool ret;
9725
9726 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9727 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9728 if (!wakeref)
9729 return false;
9730
9731 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9732 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9733 pipe_config->shared_dpll = NULL;
9734
9735 ret = false;
9736 tmp = I915_READ(PIPECONF(crtc->pipe));
9737 if (!(tmp & PIPECONF_ENABLE))
9738 goto out;
9739
9740 switch (tmp & PIPECONF_BPC_MASK) {
9741 case PIPECONF_6BPC:
9742 pipe_config->pipe_bpp = 18;
9743 break;
9744 case PIPECONF_8BPC:
9745 pipe_config->pipe_bpp = 24;
9746 break;
9747 case PIPECONF_10BPC:
9748 pipe_config->pipe_bpp = 30;
9749 break;
9750 case PIPECONF_12BPC:
9751 pipe_config->pipe_bpp = 36;
9752 break;
9753 default:
9754 break;
9755 }
9756
9757 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9758 pipe_config->limited_color_range = true;
9759
9760 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9761 PIPECONF_GAMMA_MODE_SHIFT;
9762
9763 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9764
9765 i9xx_get_pipe_color_config(pipe_config);
9766 intel_color_get_config(pipe_config);
9767
9768 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9769 struct intel_shared_dpll *pll;
9770 enum intel_dpll_id pll_id;
9771
9772 pipe_config->has_pch_encoder = true;
9773
9774 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9775 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9776 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9777
9778 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9779
9780 if (HAS_PCH_IBX(dev_priv)) {
9781 /*
9782 * The pipe->pch transcoder and pch transcoder->pll
9783 * mapping is fixed.
9784 */
9785 pll_id = (enum intel_dpll_id) crtc->pipe;
9786 } else {
9787 tmp = I915_READ(PCH_DPLL_SEL);
9788 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9789 pll_id = DPLL_ID_PCH_PLL_B;
9790 else
9791 pll_id= DPLL_ID_PCH_PLL_A;
9792 }
9793
9794 pipe_config->shared_dpll =
9795 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9796 pll = pipe_config->shared_dpll;
9797
9798 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9799 &pipe_config->dpll_hw_state));
9800
9801 tmp = pipe_config->dpll_hw_state.dpll;
9802 pipe_config->pixel_multiplier =
9803 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9804 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9805
9806 ironlake_pch_clock_get(crtc, pipe_config);
9807 } else {
9808 pipe_config->pixel_multiplier = 1;
9809 }
9810
9811 intel_get_pipe_timings(crtc, pipe_config);
9812 intel_get_pipe_src_size(crtc, pipe_config);
9813
9814 ironlake_get_pfit_config(crtc, pipe_config);
9815
9816 ret = true;
9817
9818 out:
9819 intel_display_power_put(dev_priv, power_domain, wakeref);
9820
9821 return ret;
9822 }
9823 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9824 struct intel_crtc_state *crtc_state)
9825 {
9826 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9827 struct intel_atomic_state *state =
9828 to_intel_atomic_state(crtc_state->base.state);
9829
9830 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9831 INTEL_GEN(dev_priv) >= 11) {
9832 struct intel_encoder *encoder =
9833 intel_get_crtc_new_encoder(state, crtc_state);
9834
9835 if (!intel_get_shared_dpll(crtc_state, encoder)) {
9836 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9837 pipe_name(crtc->pipe));
9838 return -EINVAL;
9839 }
9840 }
9841
9842 return 0;
9843 }
9844
9845 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9846 enum port port,
9847 struct intel_crtc_state *pipe_config)
9848 {
9849 enum intel_dpll_id id;
9850 u32 temp;
9851
9852 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9853 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9854
9855 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9856 return;
9857
9858 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9859 }
9860
9861 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9862 enum port port,
9863 struct intel_crtc_state *pipe_config)
9864 {
9865 enum intel_dpll_id id;
9866 u32 temp;
9867
9868 /* TODO: TBT pll not implemented. */
9869 if (intel_port_is_combophy(dev_priv, port)) {
9870 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9871 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9872 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9873 } else if (intel_port_is_tc(dev_priv, port)) {
9874 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9875 } else {
9876 WARN(1, "Invalid port %x\n", port);
9877 return;
9878 }
9879
9880 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9881 }
9882
9883 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9884 enum port port,
9885 struct intel_crtc_state *pipe_config)
9886 {
9887 enum intel_dpll_id id;
9888
9889 switch (port) {
9890 case PORT_A:
9891 id = DPLL_ID_SKL_DPLL0;
9892 break;
9893 case PORT_B:
9894 id = DPLL_ID_SKL_DPLL1;
9895 break;
9896 case PORT_C:
9897 id = DPLL_ID_SKL_DPLL2;
9898 break;
9899 default:
9900 DRM_ERROR("Incorrect port type\n");
9901 return;
9902 }
9903
9904 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9905 }
9906
9907 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9908 enum port port,
9909 struct intel_crtc_state *pipe_config)
9910 {
9911 enum intel_dpll_id id;
9912 u32 temp;
9913
9914 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9915 id = temp >> (port * 3 + 1);
9916
9917 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9918 return;
9919
9920 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9921 }
9922
9923 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9924 enum port port,
9925 struct intel_crtc_state *pipe_config)
9926 {
9927 enum intel_dpll_id id;
9928 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9929
9930 switch (ddi_pll_sel) {
9931 case PORT_CLK_SEL_WRPLL1:
9932 id = DPLL_ID_WRPLL1;
9933 break;
9934 case PORT_CLK_SEL_WRPLL2:
9935 id = DPLL_ID_WRPLL2;
9936 break;
9937 case PORT_CLK_SEL_SPLL:
9938 id = DPLL_ID_SPLL;
9939 break;
9940 case PORT_CLK_SEL_LCPLL_810:
9941 id = DPLL_ID_LCPLL_810;
9942 break;
9943 case PORT_CLK_SEL_LCPLL_1350:
9944 id = DPLL_ID_LCPLL_1350;
9945 break;
9946 case PORT_CLK_SEL_LCPLL_2700:
9947 id = DPLL_ID_LCPLL_2700;
9948 break;
9949 default:
9950 MISSING_CASE(ddi_pll_sel);
9951 /* fall through */
9952 case PORT_CLK_SEL_NONE:
9953 return;
9954 }
9955
9956 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9957 }
9958
9959 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9960 struct intel_crtc_state *pipe_config,
9961 u64 *power_domain_mask,
9962 intel_wakeref_t *wakerefs)
9963 {
9964 struct drm_device *dev = crtc->base.dev;
9965 struct drm_i915_private *dev_priv = to_i915(dev);
9966 enum intel_display_power_domain power_domain;
9967 unsigned long panel_transcoder_mask = 0;
9968 unsigned long enabled_panel_transcoders = 0;
9969 enum transcoder panel_transcoder;
9970 intel_wakeref_t wf;
9971 u32 tmp;
9972
9973 if (INTEL_GEN(dev_priv) >= 11)
9974 panel_transcoder_mask |=
9975 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9976
9977 if (HAS_TRANSCODER_EDP(dev_priv))
9978 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
9979
9980 /*
9981 * The pipe->transcoder mapping is fixed with the exception of the eDP
9982 * and DSI transcoders handled below.
9983 */
9984 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9985
9986 /*
9987 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9988 * consistency and less surprising code; it's in always on power).
9989 */
9990 for_each_set_bit(panel_transcoder,
9991 &panel_transcoder_mask,
9992 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9993 bool force_thru = false;
9994 enum pipe trans_pipe;
9995
9996 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9997 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9998 continue;
9999
10000 /*
10001 * Log all enabled ones, only use the first one.
10002 *
10003 * FIXME: This won't work for two separate DSI displays.
10004 */
10005 enabled_panel_transcoders |= BIT(panel_transcoder);
10006 if (enabled_panel_transcoders != BIT(panel_transcoder))
10007 continue;
10008
10009 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10010 default:
10011 WARN(1, "unknown pipe linked to transcoder %s\n",
10012 transcoder_name(panel_transcoder));
10013 /* fall through */
10014 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10015 force_thru = true;
10016 /* fall through */
10017 case TRANS_DDI_EDP_INPUT_A_ON:
10018 trans_pipe = PIPE_A;
10019 break;
10020 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10021 trans_pipe = PIPE_B;
10022 break;
10023 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10024 trans_pipe = PIPE_C;
10025 break;
10026 }
10027
10028 if (trans_pipe == crtc->pipe) {
10029 pipe_config->cpu_transcoder = panel_transcoder;
10030 pipe_config->pch_pfit.force_thru = force_thru;
10031 }
10032 }
10033
10034 /*
10035 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10036 */
10037 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10038 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10039
10040 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10041 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10042
10043 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10044 if (!wf)
10045 return false;
10046
10047 wakerefs[power_domain] = wf;
10048 *power_domain_mask |= BIT_ULL(power_domain);
10049
10050 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10051
10052 return tmp & PIPECONF_ENABLE;
10053 }
10054
10055 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10056 struct intel_crtc_state *pipe_config,
10057 u64 *power_domain_mask,
10058 intel_wakeref_t *wakerefs)
10059 {
10060 struct drm_device *dev = crtc->base.dev;
10061 struct drm_i915_private *dev_priv = to_i915(dev);
10062 enum intel_display_power_domain power_domain;
10063 enum transcoder cpu_transcoder;
10064 intel_wakeref_t wf;
10065 enum port port;
10066 u32 tmp;
10067
10068 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10069 if (port == PORT_A)
10070 cpu_transcoder = TRANSCODER_DSI_A;
10071 else
10072 cpu_transcoder = TRANSCODER_DSI_C;
10073
10074 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10075 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10076
10077 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10078 if (!wf)
10079 continue;
10080
10081 wakerefs[power_domain] = wf;
10082 *power_domain_mask |= BIT_ULL(power_domain);
10083
10084 /*
10085 * The PLL needs to be enabled with a valid divider
10086 * configuration, otherwise accessing DSI registers will hang
10087 * the machine. See BSpec North Display Engine
10088 * registers/MIPI[BXT]. We can break out here early, since we
10089 * need the same DSI PLL to be enabled for both DSI ports.
10090 */
10091 if (!bxt_dsi_pll_is_enabled(dev_priv))
10092 break;
10093
10094 /* XXX: this works for video mode only */
10095 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10096 if (!(tmp & DPI_ENABLE))
10097 continue;
10098
10099 tmp = I915_READ(MIPI_CTRL(port));
10100 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10101 continue;
10102
10103 pipe_config->cpu_transcoder = cpu_transcoder;
10104 break;
10105 }
10106
10107 return transcoder_is_dsi(pipe_config->cpu_transcoder);
10108 }
10109
10110 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10111 struct intel_crtc_state *pipe_config)
10112 {
10113 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10114 struct intel_shared_dpll *pll;
10115 enum port port;
10116 u32 tmp;
10117
10118 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10119
10120 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10121
10122 if (INTEL_GEN(dev_priv) >= 11)
10123 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10124 else if (IS_CANNONLAKE(dev_priv))
10125 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10126 else if (IS_GEN9_BC(dev_priv))
10127 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10128 else if (IS_GEN9_LP(dev_priv))
10129 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10130 else
10131 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10132
10133 pll = pipe_config->shared_dpll;
10134 if (pll) {
10135 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10136 &pipe_config->dpll_hw_state));
10137 }
10138
10139 /*
10140 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10141 * DDI E. So just check whether this pipe is wired to DDI E and whether
10142 * the PCH transcoder is on.
10143 */
10144 if (INTEL_GEN(dev_priv) < 9 &&
10145 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10146 pipe_config->has_pch_encoder = true;
10147
10148 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10149 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10150 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10151
10152 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10153 }
10154 }
10155
10156 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10157 struct intel_crtc_state *pipe_config)
10158 {
10159 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10160 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10161 enum intel_display_power_domain power_domain;
10162 u64 power_domain_mask;
10163 bool active;
10164
10165 intel_crtc_init_scalers(crtc, pipe_config);
10166
10167 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10168 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10169 if (!wf)
10170 return false;
10171
10172 wakerefs[power_domain] = wf;
10173 power_domain_mask = BIT_ULL(power_domain);
10174
10175 pipe_config->shared_dpll = NULL;
10176
10177 active = hsw_get_transcoder_state(crtc, pipe_config,
10178 &power_domain_mask, wakerefs);
10179
10180 if (IS_GEN9_LP(dev_priv) &&
10181 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10182 &power_domain_mask, wakerefs)) {
10183 WARN_ON(active);
10184 active = true;
10185 }
10186
10187 if (!active)
10188 goto out;
10189
10190 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10191 INTEL_GEN(dev_priv) >= 11) {
10192 haswell_get_ddi_port_state(crtc, pipe_config);
10193 intel_get_pipe_timings(crtc, pipe_config);
10194 }
10195
10196 intel_get_pipe_src_size(crtc, pipe_config);
10197 intel_get_crtc_ycbcr_config(crtc, pipe_config);
10198
10199 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10200
10201 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10202
10203 if (INTEL_GEN(dev_priv) >= 9) {
10204 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10205
10206 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10207 pipe_config->gamma_enable = true;
10208
10209 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10210 pipe_config->csc_enable = true;
10211 } else {
10212 i9xx_get_pipe_color_config(pipe_config);
10213 }
10214
10215 intel_color_get_config(pipe_config);
10216
10217 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10218 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10219
10220 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10221 if (wf) {
10222 wakerefs[power_domain] = wf;
10223 power_domain_mask |= BIT_ULL(power_domain);
10224
10225 if (INTEL_GEN(dev_priv) >= 9)
10226 skylake_get_pfit_config(crtc, pipe_config);
10227 else
10228 ironlake_get_pfit_config(crtc, pipe_config);
10229 }
10230
10231 if (hsw_crtc_supports_ips(crtc)) {
10232 if (IS_HASWELL(dev_priv))
10233 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10234 else {
10235 /*
10236 * We cannot readout IPS state on broadwell, set to
10237 * true so we can set it to a defined state on first
10238 * commit.
10239 */
10240 pipe_config->ips_enabled = true;
10241 }
10242 }
10243
10244 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10245 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10246 pipe_config->pixel_multiplier =
10247 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10248 } else {
10249 pipe_config->pixel_multiplier = 1;
10250 }
10251
10252 out:
10253 for_each_power_domain(power_domain, power_domain_mask)
10254 intel_display_power_put(dev_priv,
10255 power_domain, wakerefs[power_domain]);
10256
10257 return active;
10258 }
10259
10260 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10261 {
10262 struct drm_i915_private *dev_priv =
10263 to_i915(plane_state->base.plane->dev);
10264 const struct drm_framebuffer *fb = plane_state->base.fb;
10265 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10266 u32 base;
10267
10268 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10269 base = obj->phys_handle->busaddr;
10270 else
10271 base = intel_plane_ggtt_offset(plane_state);
10272
10273 base += plane_state->color_plane[0].offset;
10274
10275 /* ILK+ do this automagically */
10276 if (HAS_GMCH(dev_priv) &&
10277 plane_state->base.rotation & DRM_MODE_ROTATE_180)
10278 base += (plane_state->base.crtc_h *
10279 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10280
10281 return base;
10282 }
10283
10284 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10285 {
10286 int x = plane_state->base.crtc_x;
10287 int y = plane_state->base.crtc_y;
10288 u32 pos = 0;
10289
10290 if (x < 0) {
10291 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10292 x = -x;
10293 }
10294 pos |= x << CURSOR_X_SHIFT;
10295
10296 if (y < 0) {
10297 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10298 y = -y;
10299 }
10300 pos |= y << CURSOR_Y_SHIFT;
10301
10302 return pos;
10303 }
10304
10305 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10306 {
10307 const struct drm_mode_config *config =
10308 &plane_state->base.plane->dev->mode_config;
10309 int width = plane_state->base.crtc_w;
10310 int height = plane_state->base.crtc_h;
10311
10312 return width > 0 && width <= config->cursor_width &&
10313 height > 0 && height <= config->cursor_height;
10314 }
10315
10316 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10317 {
10318 int src_x, src_y;
10319 u32 offset;
10320 int ret;
10321
10322 ret = intel_plane_compute_gtt(plane_state);
10323 if (ret)
10324 return ret;
10325
10326 if (!plane_state->base.visible)
10327 return 0;
10328
10329 src_x = plane_state->base.src_x >> 16;
10330 src_y = plane_state->base.src_y >> 16;
10331
10332 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10333 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10334 plane_state, 0);
10335
10336 if (src_x != 0 || src_y != 0) {
10337 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10338 return -EINVAL;
10339 }
10340
10341 plane_state->color_plane[0].offset = offset;
10342
10343 return 0;
10344 }
10345
10346 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10347 struct intel_plane_state *plane_state)
10348 {
10349 const struct drm_framebuffer *fb = plane_state->base.fb;
10350 int ret;
10351
10352 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10353 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10354 return -EINVAL;
10355 }
10356
10357 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10358 &crtc_state->base,
10359 DRM_PLANE_HELPER_NO_SCALING,
10360 DRM_PLANE_HELPER_NO_SCALING,
10361 true, true);
10362 if (ret)
10363 return ret;
10364
10365 ret = intel_cursor_check_surface(plane_state);
10366 if (ret)
10367 return ret;
10368
10369 if (!plane_state->base.visible)
10370 return 0;
10371
10372 ret = intel_plane_check_src_coordinates(plane_state);
10373 if (ret)
10374 return ret;
10375
10376 return 0;
10377 }
10378
10379 static unsigned int
10380 i845_cursor_max_stride(struct intel_plane *plane,
10381 u32 pixel_format, u64 modifier,
10382 unsigned int rotation)
10383 {
10384 return 2048;
10385 }
10386
10387 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10388 {
10389 u32 cntl = 0;
10390
10391 if (crtc_state->gamma_enable)
10392 cntl |= CURSOR_GAMMA_ENABLE;
10393
10394 return cntl;
10395 }
10396
10397 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10398 const struct intel_plane_state *plane_state)
10399 {
10400 return CURSOR_ENABLE |
10401 CURSOR_FORMAT_ARGB |
10402 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10403 }
10404
10405 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10406 {
10407 int width = plane_state->base.crtc_w;
10408
10409 /*
10410 * 845g/865g are only limited by the width of their cursors,
10411 * the height is arbitrary up to the precision of the register.
10412 */
10413 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10414 }
10415
10416 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10417 struct intel_plane_state *plane_state)
10418 {
10419 const struct drm_framebuffer *fb = plane_state->base.fb;
10420 int ret;
10421
10422 ret = intel_check_cursor(crtc_state, plane_state);
10423 if (ret)
10424 return ret;
10425
10426 /* if we want to turn off the cursor ignore width and height */
10427 if (!fb)
10428 return 0;
10429
10430 /* Check for which cursor types we support */
10431 if (!i845_cursor_size_ok(plane_state)) {
10432 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10433 plane_state->base.crtc_w,
10434 plane_state->base.crtc_h);
10435 return -EINVAL;
10436 }
10437
10438 WARN_ON(plane_state->base.visible &&
10439 plane_state->color_plane[0].stride != fb->pitches[0]);
10440
10441 switch (fb->pitches[0]) {
10442 case 256:
10443 case 512:
10444 case 1024:
10445 case 2048:
10446 break;
10447 default:
10448 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10449 fb->pitches[0]);
10450 return -EINVAL;
10451 }
10452
10453 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10454
10455 return 0;
10456 }
10457
10458 static void i845_update_cursor(struct intel_plane *plane,
10459 const struct intel_crtc_state *crtc_state,
10460 const struct intel_plane_state *plane_state)
10461 {
10462 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10463 u32 cntl = 0, base = 0, pos = 0, size = 0;
10464 unsigned long irqflags;
10465
10466 if (plane_state && plane_state->base.visible) {
10467 unsigned int width = plane_state->base.crtc_w;
10468 unsigned int height = plane_state->base.crtc_h;
10469
10470 cntl = plane_state->ctl |
10471 i845_cursor_ctl_crtc(crtc_state);
10472
10473 size = (height << 12) | width;
10474
10475 base = intel_cursor_base(plane_state);
10476 pos = intel_cursor_position(plane_state);
10477 }
10478
10479 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10480
10481 /* On these chipsets we can only modify the base/size/stride
10482 * whilst the cursor is disabled.
10483 */
10484 if (plane->cursor.base != base ||
10485 plane->cursor.size != size ||
10486 plane->cursor.cntl != cntl) {
10487 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10488 I915_WRITE_FW(CURBASE(PIPE_A), base);
10489 I915_WRITE_FW(CURSIZE, size);
10490 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10491 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10492
10493 plane->cursor.base = base;
10494 plane->cursor.size = size;
10495 plane->cursor.cntl = cntl;
10496 } else {
10497 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10498 }
10499
10500 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10501 }
10502
10503 static void i845_disable_cursor(struct intel_plane *plane,
10504 const struct intel_crtc_state *crtc_state)
10505 {
10506 i845_update_cursor(plane, crtc_state, NULL);
10507 }
10508
10509 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10510 enum pipe *pipe)
10511 {
10512 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10513 enum intel_display_power_domain power_domain;
10514 intel_wakeref_t wakeref;
10515 bool ret;
10516
10517 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10518 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10519 if (!wakeref)
10520 return false;
10521
10522 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10523
10524 *pipe = PIPE_A;
10525
10526 intel_display_power_put(dev_priv, power_domain, wakeref);
10527
10528 return ret;
10529 }
10530
10531 static unsigned int
10532 i9xx_cursor_max_stride(struct intel_plane *plane,
10533 u32 pixel_format, u64 modifier,
10534 unsigned int rotation)
10535 {
10536 return plane->base.dev->mode_config.cursor_width * 4;
10537 }
10538
10539 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10540 {
10541 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10542 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10543 u32 cntl = 0;
10544
10545 if (INTEL_GEN(dev_priv) >= 11)
10546 return cntl;
10547
10548 if (crtc_state->gamma_enable)
10549 cntl = MCURSOR_GAMMA_ENABLE;
10550
10551 if (crtc_state->csc_enable)
10552 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10553
10554 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10555 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10556
10557 return cntl;
10558 }
10559
10560 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10561 const struct intel_plane_state *plane_state)
10562 {
10563 struct drm_i915_private *dev_priv =
10564 to_i915(plane_state->base.plane->dev);
10565 u32 cntl = 0;
10566
10567 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10568 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10569
10570 switch (plane_state->base.crtc_w) {
10571 case 64:
10572 cntl |= MCURSOR_MODE_64_ARGB_AX;
10573 break;
10574 case 128:
10575 cntl |= MCURSOR_MODE_128_ARGB_AX;
10576 break;
10577 case 256:
10578 cntl |= MCURSOR_MODE_256_ARGB_AX;
10579 break;
10580 default:
10581 MISSING_CASE(plane_state->base.crtc_w);
10582 return 0;
10583 }
10584
10585 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10586 cntl |= MCURSOR_ROTATE_180;
10587
10588 return cntl;
10589 }
10590
10591 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10592 {
10593 struct drm_i915_private *dev_priv =
10594 to_i915(plane_state->base.plane->dev);
10595 int width = plane_state->base.crtc_w;
10596 int height = plane_state->base.crtc_h;
10597
10598 if (!intel_cursor_size_ok(plane_state))
10599 return false;
10600
10601 /* Cursor width is limited to a few power-of-two sizes */
10602 switch (width) {
10603 case 256:
10604 case 128:
10605 case 64:
10606 break;
10607 default:
10608 return false;
10609 }
10610
10611 /*
10612 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10613 * height from 8 lines up to the cursor width, when the
10614 * cursor is not rotated. Everything else requires square
10615 * cursors.
10616 */
10617 if (HAS_CUR_FBC(dev_priv) &&
10618 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10619 if (height < 8 || height > width)
10620 return false;
10621 } else {
10622 if (height != width)
10623 return false;
10624 }
10625
10626 return true;
10627 }
10628
10629 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10630 struct intel_plane_state *plane_state)
10631 {
10632 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10633 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10634 const struct drm_framebuffer *fb = plane_state->base.fb;
10635 enum pipe pipe = plane->pipe;
10636 int ret;
10637
10638 ret = intel_check_cursor(crtc_state, plane_state);
10639 if (ret)
10640 return ret;
10641
10642 /* if we want to turn off the cursor ignore width and height */
10643 if (!fb)
10644 return 0;
10645
10646 /* Check for which cursor types we support */
10647 if (!i9xx_cursor_size_ok(plane_state)) {
10648 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10649 plane_state->base.crtc_w,
10650 plane_state->base.crtc_h);
10651 return -EINVAL;
10652 }
10653
10654 WARN_ON(plane_state->base.visible &&
10655 plane_state->color_plane[0].stride != fb->pitches[0]);
10656
10657 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10658 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10659 fb->pitches[0], plane_state->base.crtc_w);
10660 return -EINVAL;
10661 }
10662
10663 /*
10664 * There's something wrong with the cursor on CHV pipe C.
10665 * If it straddles the left edge of the screen then
10666 * moving it away from the edge or disabling it often
10667 * results in a pipe underrun, and often that can lead to
10668 * dead pipe (constant underrun reported, and it scans
10669 * out just a solid color). To recover from that, the
10670 * display power well must be turned off and on again.
10671 * Refuse the put the cursor into that compromised position.
10672 */
10673 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10674 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10675 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10676 return -EINVAL;
10677 }
10678
10679 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10680
10681 return 0;
10682 }
10683
10684 static void i9xx_update_cursor(struct intel_plane *plane,
10685 const struct intel_crtc_state *crtc_state,
10686 const struct intel_plane_state *plane_state)
10687 {
10688 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10689 enum pipe pipe = plane->pipe;
10690 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10691 unsigned long irqflags;
10692
10693 if (plane_state && plane_state->base.visible) {
10694 cntl = plane_state->ctl |
10695 i9xx_cursor_ctl_crtc(crtc_state);
10696
10697 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10698 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10699
10700 base = intel_cursor_base(plane_state);
10701 pos = intel_cursor_position(plane_state);
10702 }
10703
10704 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10705
10706 /*
10707 * On some platforms writing CURCNTR first will also
10708 * cause CURPOS to be armed by the CURBASE write.
10709 * Without the CURCNTR write the CURPOS write would
10710 * arm itself. Thus we always update CURCNTR before
10711 * CURPOS.
10712 *
10713 * On other platforms CURPOS always requires the
10714 * CURBASE write to arm the update. Additonally
10715 * a write to any of the cursor register will cancel
10716 * an already armed cursor update. Thus leaving out
10717 * the CURBASE write after CURPOS could lead to a
10718 * cursor that doesn't appear to move, or even change
10719 * shape. Thus we always write CURBASE.
10720 *
10721 * The other registers are armed by by the CURBASE write
10722 * except when the plane is getting enabled at which time
10723 * the CURCNTR write arms the update.
10724 */
10725
10726 if (INTEL_GEN(dev_priv) >= 9)
10727 skl_write_cursor_wm(plane, crtc_state);
10728
10729 if (plane->cursor.base != base ||
10730 plane->cursor.size != fbc_ctl ||
10731 plane->cursor.cntl != cntl) {
10732 if (HAS_CUR_FBC(dev_priv))
10733 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10734 I915_WRITE_FW(CURCNTR(pipe), cntl);
10735 I915_WRITE_FW(CURPOS(pipe), pos);
10736 I915_WRITE_FW(CURBASE(pipe), base);
10737
10738 plane->cursor.base = base;
10739 plane->cursor.size = fbc_ctl;
10740 plane->cursor.cntl = cntl;
10741 } else {
10742 I915_WRITE_FW(CURPOS(pipe), pos);
10743 I915_WRITE_FW(CURBASE(pipe), base);
10744 }
10745
10746 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10747 }
10748
10749 static void i9xx_disable_cursor(struct intel_plane *plane,
10750 const struct intel_crtc_state *crtc_state)
10751 {
10752 i9xx_update_cursor(plane, crtc_state, NULL);
10753 }
10754
10755 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10756 enum pipe *pipe)
10757 {
10758 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10759 enum intel_display_power_domain power_domain;
10760 intel_wakeref_t wakeref;
10761 bool ret;
10762 u32 val;
10763
10764 /*
10765 * Not 100% correct for planes that can move between pipes,
10766 * but that's only the case for gen2-3 which don't have any
10767 * display power wells.
10768 */
10769 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10770 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10771 if (!wakeref)
10772 return false;
10773
10774 val = I915_READ(CURCNTR(plane->pipe));
10775
10776 ret = val & MCURSOR_MODE;
10777
10778 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10779 *pipe = plane->pipe;
10780 else
10781 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10782 MCURSOR_PIPE_SELECT_SHIFT;
10783
10784 intel_display_power_put(dev_priv, power_domain, wakeref);
10785
10786 return ret;
10787 }
10788
10789 /* VESA 640x480x72Hz mode to set on the pipe */
10790 static const struct drm_display_mode load_detect_mode = {
10791 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10792 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10793 };
10794
10795 struct drm_framebuffer *
10796 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10797 struct drm_mode_fb_cmd2 *mode_cmd)
10798 {
10799 struct intel_framebuffer *intel_fb;
10800 int ret;
10801
10802 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10803 if (!intel_fb)
10804 return ERR_PTR(-ENOMEM);
10805
10806 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10807 if (ret)
10808 goto err;
10809
10810 return &intel_fb->base;
10811
10812 err:
10813 kfree(intel_fb);
10814 return ERR_PTR(ret);
10815 }
10816
10817 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10818 struct drm_crtc *crtc)
10819 {
10820 struct drm_plane *plane;
10821 struct drm_plane_state *plane_state;
10822 int ret, i;
10823
10824 ret = drm_atomic_add_affected_planes(state, crtc);
10825 if (ret)
10826 return ret;
10827
10828 for_each_new_plane_in_state(state, plane, plane_state, i) {
10829 if (plane_state->crtc != crtc)
10830 continue;
10831
10832 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10833 if (ret)
10834 return ret;
10835
10836 drm_atomic_set_fb_for_plane(plane_state, NULL);
10837 }
10838
10839 return 0;
10840 }
10841
10842 int intel_get_load_detect_pipe(struct drm_connector *connector,
10843 const struct drm_display_mode *mode,
10844 struct intel_load_detect_pipe *old,
10845 struct drm_modeset_acquire_ctx *ctx)
10846 {
10847 struct intel_crtc *intel_crtc;
10848 struct intel_encoder *intel_encoder =
10849 intel_attached_encoder(connector);
10850 struct drm_crtc *possible_crtc;
10851 struct drm_encoder *encoder = &intel_encoder->base;
10852 struct drm_crtc *crtc = NULL;
10853 struct drm_device *dev = encoder->dev;
10854 struct drm_i915_private *dev_priv = to_i915(dev);
10855 struct drm_mode_config *config = &dev->mode_config;
10856 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10857 struct drm_connector_state *connector_state;
10858 struct intel_crtc_state *crtc_state;
10859 int ret, i = -1;
10860
10861 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10862 connector->base.id, connector->name,
10863 encoder->base.id, encoder->name);
10864
10865 old->restore_state = NULL;
10866
10867 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10868
10869 /*
10870 * Algorithm gets a little messy:
10871 *
10872 * - if the connector already has an assigned crtc, use it (but make
10873 * sure it's on first)
10874 *
10875 * - try to find the first unused crtc that can drive this connector,
10876 * and use that if we find one
10877 */
10878
10879 /* See if we already have a CRTC for this connector */
10880 if (connector->state->crtc) {
10881 crtc = connector->state->crtc;
10882
10883 ret = drm_modeset_lock(&crtc->mutex, ctx);
10884 if (ret)
10885 goto fail;
10886
10887 /* Make sure the crtc and connector are running */
10888 goto found;
10889 }
10890
10891 /* Find an unused one (if possible) */
10892 for_each_crtc(dev, possible_crtc) {
10893 i++;
10894 if (!(encoder->possible_crtcs & (1 << i)))
10895 continue;
10896
10897 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10898 if (ret)
10899 goto fail;
10900
10901 if (possible_crtc->state->enable) {
10902 drm_modeset_unlock(&possible_crtc->mutex);
10903 continue;
10904 }
10905
10906 crtc = possible_crtc;
10907 break;
10908 }
10909
10910 /*
10911 * If we didn't find an unused CRTC, don't use any.
10912 */
10913 if (!crtc) {
10914 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10915 ret = -ENODEV;
10916 goto fail;
10917 }
10918
10919 found:
10920 intel_crtc = to_intel_crtc(crtc);
10921
10922 state = drm_atomic_state_alloc(dev);
10923 restore_state = drm_atomic_state_alloc(dev);
10924 if (!state || !restore_state) {
10925 ret = -ENOMEM;
10926 goto fail;
10927 }
10928
10929 state->acquire_ctx = ctx;
10930 restore_state->acquire_ctx = ctx;
10931
10932 connector_state = drm_atomic_get_connector_state(state, connector);
10933 if (IS_ERR(connector_state)) {
10934 ret = PTR_ERR(connector_state);
10935 goto fail;
10936 }
10937
10938 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10939 if (ret)
10940 goto fail;
10941
10942 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10943 if (IS_ERR(crtc_state)) {
10944 ret = PTR_ERR(crtc_state);
10945 goto fail;
10946 }
10947
10948 crtc_state->base.active = crtc_state->base.enable = true;
10949
10950 if (!mode)
10951 mode = &load_detect_mode;
10952
10953 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10954 if (ret)
10955 goto fail;
10956
10957 ret = intel_modeset_disable_planes(state, crtc);
10958 if (ret)
10959 goto fail;
10960
10961 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10962 if (!ret)
10963 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10964 if (!ret)
10965 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10966 if (ret) {
10967 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10968 goto fail;
10969 }
10970
10971 ret = drm_atomic_commit(state);
10972 if (ret) {
10973 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10974 goto fail;
10975 }
10976
10977 old->restore_state = restore_state;
10978 drm_atomic_state_put(state);
10979
10980 /* let the connector get through one full cycle before testing */
10981 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10982 return true;
10983
10984 fail:
10985 if (state) {
10986 drm_atomic_state_put(state);
10987 state = NULL;
10988 }
10989 if (restore_state) {
10990 drm_atomic_state_put(restore_state);
10991 restore_state = NULL;
10992 }
10993
10994 if (ret == -EDEADLK)
10995 return ret;
10996
10997 return false;
10998 }
10999
11000 void intel_release_load_detect_pipe(struct drm_connector *connector,
11001 struct intel_load_detect_pipe *old,
11002 struct drm_modeset_acquire_ctx *ctx)
11003 {
11004 struct intel_encoder *intel_encoder =
11005 intel_attached_encoder(connector);
11006 struct drm_encoder *encoder = &intel_encoder->base;
11007 struct drm_atomic_state *state = old->restore_state;
11008 int ret;
11009
11010 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11011 connector->base.id, connector->name,
11012 encoder->base.id, encoder->name);
11013
11014 if (!state)
11015 return;
11016
11017 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11018 if (ret)
11019 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11020 drm_atomic_state_put(state);
11021 }
11022
11023 static int i9xx_pll_refclk(struct drm_device *dev,
11024 const struct intel_crtc_state *pipe_config)
11025 {
11026 struct drm_i915_private *dev_priv = to_i915(dev);
11027 u32 dpll = pipe_config->dpll_hw_state.dpll;
11028
11029 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11030 return dev_priv->vbt.lvds_ssc_freq;
11031 else if (HAS_PCH_SPLIT(dev_priv))
11032 return 120000;
11033 else if (!IS_GEN(dev_priv, 2))
11034 return 96000;
11035 else
11036 return 48000;
11037 }
11038
11039 /* Returns the clock of the currently programmed mode of the given pipe. */
11040 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11041 struct intel_crtc_state *pipe_config)
11042 {
11043 struct drm_device *dev = crtc->base.dev;
11044 struct drm_i915_private *dev_priv = to_i915(dev);
11045 int pipe = pipe_config->cpu_transcoder;
11046 u32 dpll = pipe_config->dpll_hw_state.dpll;
11047 u32 fp;
11048 struct dpll clock;
11049 int port_clock;
11050 int refclk = i9xx_pll_refclk(dev, pipe_config);
11051
11052 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11053 fp = pipe_config->dpll_hw_state.fp0;
11054 else
11055 fp = pipe_config->dpll_hw_state.fp1;
11056
11057 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11058 if (IS_PINEVIEW(dev_priv)) {
11059 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11060 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11061 } else {
11062 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11063 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11064 }
11065
11066 if (!IS_GEN(dev_priv, 2)) {
11067 if (IS_PINEVIEW(dev_priv))
11068 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11069 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11070 else
11071 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11072 DPLL_FPA01_P1_POST_DIV_SHIFT);
11073
11074 switch (dpll & DPLL_MODE_MASK) {
11075 case DPLLB_MODE_DAC_SERIAL:
11076 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11077 5 : 10;
11078 break;
11079 case DPLLB_MODE_LVDS:
11080 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11081 7 : 14;
11082 break;
11083 default:
11084 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11085 "mode\n", (int)(dpll & DPLL_MODE_MASK));
11086 return;
11087 }
11088
11089 if (IS_PINEVIEW(dev_priv))
11090 port_clock = pnv_calc_dpll_params(refclk, &clock);
11091 else
11092 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11093 } else {
11094 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11095 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11096
11097 if (is_lvds) {
11098 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11099 DPLL_FPA01_P1_POST_DIV_SHIFT);
11100
11101 if (lvds & LVDS_CLKB_POWER_UP)
11102 clock.p2 = 7;
11103 else
11104 clock.p2 = 14;
11105 } else {
11106 if (dpll & PLL_P1_DIVIDE_BY_TWO)
11107 clock.p1 = 2;
11108 else {
11109 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11110 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11111 }
11112 if (dpll & PLL_P2_DIVIDE_BY_4)
11113 clock.p2 = 4;
11114 else
11115 clock.p2 = 2;
11116 }
11117
11118 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11119 }
11120
11121 /*
11122 * This value includes pixel_multiplier. We will use
11123 * port_clock to compute adjusted_mode.crtc_clock in the
11124 * encoder's get_config() function.
11125 */
11126 pipe_config->port_clock = port_clock;
11127 }
11128
11129 int intel_dotclock_calculate(int link_freq,
11130 const struct intel_link_m_n *m_n)
11131 {
11132 /*
11133 * The calculation for the data clock is:
11134 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11135 * But we want to avoid losing precison if possible, so:
11136 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11137 *
11138 * and the link clock is simpler:
11139 * link_clock = (m * link_clock) / n
11140 */
11141
11142 if (!m_n->link_n)
11143 return 0;
11144
11145 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11146 }
11147
11148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11149 struct intel_crtc_state *pipe_config)
11150 {
11151 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11152
11153 /* read out port_clock from the DPLL */
11154 i9xx_crtc_clock_get(crtc, pipe_config);
11155
11156 /*
11157 * In case there is an active pipe without active ports,
11158 * we may need some idea for the dotclock anyway.
11159 * Calculate one based on the FDI configuration.
11160 */
11161 pipe_config->base.adjusted_mode.crtc_clock =
11162 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11163 &pipe_config->fdi_m_n);
11164 }
11165
11166 /* Returns the currently programmed mode of the given encoder. */
11167 struct drm_display_mode *
11168 intel_encoder_current_mode(struct intel_encoder *encoder)
11169 {
11170 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11171 struct intel_crtc_state *crtc_state;
11172 struct drm_display_mode *mode;
11173 struct intel_crtc *crtc;
11174 enum pipe pipe;
11175
11176 if (!encoder->get_hw_state(encoder, &pipe))
11177 return NULL;
11178
11179 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11180
11181 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11182 if (!mode)
11183 return NULL;
11184
11185 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11186 if (!crtc_state) {
11187 kfree(mode);
11188 return NULL;
11189 }
11190
11191 crtc_state->base.crtc = &crtc->base;
11192
11193 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11194 kfree(crtc_state);
11195 kfree(mode);
11196 return NULL;
11197 }
11198
11199 encoder->get_config(encoder, crtc_state);
11200
11201 intel_mode_from_pipe_config(mode, crtc_state);
11202
11203 kfree(crtc_state);
11204
11205 return mode;
11206 }
11207
11208 static void intel_crtc_destroy(struct drm_crtc *crtc)
11209 {
11210 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11211
11212 drm_crtc_cleanup(crtc);
11213 kfree(intel_crtc);
11214 }
11215
11216 /**
11217 * intel_wm_need_update - Check whether watermarks need updating
11218 * @cur: current plane state
11219 * @new: new plane state
11220 *
11221 * Check current plane state versus the new one to determine whether
11222 * watermarks need to be recalculated.
11223 *
11224 * Returns true or false.
11225 */
11226 static bool intel_wm_need_update(struct intel_plane_state *cur,
11227 struct intel_plane_state *new)
11228 {
11229 /* Update watermarks on tiling or size changes. */
11230 if (new->base.visible != cur->base.visible)
11231 return true;
11232
11233 if (!cur->base.fb || !new->base.fb)
11234 return false;
11235
11236 if (cur->base.fb->modifier != new->base.fb->modifier ||
11237 cur->base.rotation != new->base.rotation ||
11238 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11239 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11240 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11241 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11242 return true;
11243
11244 return false;
11245 }
11246
11247 static bool needs_scaling(const struct intel_plane_state *state)
11248 {
11249 int src_w = drm_rect_width(&state->base.src) >> 16;
11250 int src_h = drm_rect_height(&state->base.src) >> 16;
11251 int dst_w = drm_rect_width(&state->base.dst);
11252 int dst_h = drm_rect_height(&state->base.dst);
11253
11254 return (src_w != dst_w || src_h != dst_h);
11255 }
11256
11257 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11258 struct drm_crtc_state *crtc_state,
11259 const struct intel_plane_state *old_plane_state,
11260 struct drm_plane_state *plane_state)
11261 {
11262 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11263 struct drm_crtc *crtc = crtc_state->crtc;
11264 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11265 struct intel_plane *plane = to_intel_plane(plane_state->plane);
11266 struct drm_device *dev = crtc->dev;
11267 struct drm_i915_private *dev_priv = to_i915(dev);
11268 bool mode_changed = needs_modeset(crtc_state);
11269 bool was_crtc_enabled = old_crtc_state->base.active;
11270 bool is_crtc_enabled = crtc_state->active;
11271 bool turn_off, turn_on, visible, was_visible;
11272 struct drm_framebuffer *fb = plane_state->fb;
11273 int ret;
11274
11275 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11276 ret = skl_update_scaler_plane(
11277 to_intel_crtc_state(crtc_state),
11278 to_intel_plane_state(plane_state));
11279 if (ret)
11280 return ret;
11281 }
11282
11283 was_visible = old_plane_state->base.visible;
11284 visible = plane_state->visible;
11285
11286 if (!was_crtc_enabled && WARN_ON(was_visible))
11287 was_visible = false;
11288
11289 /*
11290 * Visibility is calculated as if the crtc was on, but
11291 * after scaler setup everything depends on it being off
11292 * when the crtc isn't active.
11293 *
11294 * FIXME this is wrong for watermarks. Watermarks should also
11295 * be computed as if the pipe would be active. Perhaps move
11296 * per-plane wm computation to the .check_plane() hook, and
11297 * only combine the results from all planes in the current place?
11298 */
11299 if (!is_crtc_enabled) {
11300 plane_state->visible = visible = false;
11301 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11302 to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
11303 }
11304
11305 if (!was_visible && !visible)
11306 return 0;
11307
11308 if (fb != old_plane_state->base.fb)
11309 pipe_config->fb_changed = true;
11310
11311 turn_off = was_visible && (!visible || mode_changed);
11312 turn_on = visible && (!was_visible || mode_changed);
11313
11314 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11315 intel_crtc->base.base.id, intel_crtc->base.name,
11316 plane->base.base.id, plane->base.name,
11317 fb ? fb->base.id : -1);
11318
11319 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11320 plane->base.base.id, plane->base.name,
11321 was_visible, visible,
11322 turn_off, turn_on, mode_changed);
11323
11324 if (turn_on) {
11325 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11326 pipe_config->update_wm_pre = true;
11327
11328 /* must disable cxsr around plane enable/disable */
11329 if (plane->id != PLANE_CURSOR)
11330 pipe_config->disable_cxsr = true;
11331 } else if (turn_off) {
11332 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11333 pipe_config->update_wm_post = true;
11334
11335 /* must disable cxsr around plane enable/disable */
11336 if (plane->id != PLANE_CURSOR)
11337 pipe_config->disable_cxsr = true;
11338 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11339 to_intel_plane_state(plane_state))) {
11340 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11341 /* FIXME bollocks */
11342 pipe_config->update_wm_pre = true;
11343 pipe_config->update_wm_post = true;
11344 }
11345 }
11346
11347 if (visible || was_visible)
11348 pipe_config->fb_bits |= plane->frontbuffer_bit;
11349
11350 /*
11351 * ILK/SNB DVSACNTR/Sprite Enable
11352 * IVB SPR_CTL/Sprite Enable
11353 * "When in Self Refresh Big FIFO mode, a write to enable the
11354 * plane will be internally buffered and delayed while Big FIFO
11355 * mode is exiting."
11356 *
11357 * Which means that enabling the sprite can take an extra frame
11358 * when we start in big FIFO mode (LP1+). Thus we need to drop
11359 * down to LP0 and wait for vblank in order to make sure the
11360 * sprite gets enabled on the next vblank after the register write.
11361 * Doing otherwise would risk enabling the sprite one frame after
11362 * we've already signalled flip completion. We can resume LP1+
11363 * once the sprite has been enabled.
11364 *
11365 *
11366 * WaCxSRDisabledForSpriteScaling:ivb
11367 * IVB SPR_SCALE/Scaling Enable
11368 * "Low Power watermarks must be disabled for at least one
11369 * frame before enabling sprite scaling, and kept disabled
11370 * until sprite scaling is disabled."
11371 *
11372 * ILK/SNB DVSASCALE/Scaling Enable
11373 * "When in Self Refresh Big FIFO mode, scaling enable will be
11374 * masked off while Big FIFO mode is exiting."
11375 *
11376 * Despite the w/a only being listed for IVB we assume that
11377 * the ILK/SNB note has similar ramifications, hence we apply
11378 * the w/a on all three platforms.
11379 *
11380 * With experimental results seems this is needed also for primary
11381 * plane, not only sprite plane.
11382 */
11383 if (plane->id != PLANE_CURSOR &&
11384 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11385 IS_IVYBRIDGE(dev_priv)) &&
11386 (turn_on || (!needs_scaling(old_plane_state) &&
11387 needs_scaling(to_intel_plane_state(plane_state)))))
11388 pipe_config->disable_lp_wm = true;
11389
11390 return 0;
11391 }
11392
11393 static bool encoders_cloneable(const struct intel_encoder *a,
11394 const struct intel_encoder *b)
11395 {
11396 /* masks could be asymmetric, so check both ways */
11397 return a == b || (a->cloneable & (1 << b->type) &&
11398 b->cloneable & (1 << a->type));
11399 }
11400
11401 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11402 struct intel_crtc *crtc,
11403 struct intel_encoder *encoder)
11404 {
11405 struct intel_encoder *source_encoder;
11406 struct drm_connector *connector;
11407 struct drm_connector_state *connector_state;
11408 int i;
11409
11410 for_each_new_connector_in_state(state, connector, connector_state, i) {
11411 if (connector_state->crtc != &crtc->base)
11412 continue;
11413
11414 source_encoder =
11415 to_intel_encoder(connector_state->best_encoder);
11416 if (!encoders_cloneable(encoder, source_encoder))
11417 return false;
11418 }
11419
11420 return true;
11421 }
11422
11423 static int icl_add_linked_planes(struct intel_atomic_state *state)
11424 {
11425 struct intel_plane *plane, *linked;
11426 struct intel_plane_state *plane_state, *linked_plane_state;
11427 int i;
11428
11429 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11430 linked = plane_state->linked_plane;
11431
11432 if (!linked)
11433 continue;
11434
11435 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11436 if (IS_ERR(linked_plane_state))
11437 return PTR_ERR(linked_plane_state);
11438
11439 WARN_ON(linked_plane_state->linked_plane != plane);
11440 WARN_ON(linked_plane_state->slave == plane_state->slave);
11441 }
11442
11443 return 0;
11444 }
11445
11446 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11447 {
11448 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11449 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11450 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11451 struct intel_plane *plane, *linked;
11452 struct intel_plane_state *plane_state;
11453 int i;
11454
11455 if (INTEL_GEN(dev_priv) < 11)
11456 return 0;
11457
11458 /*
11459 * Destroy all old plane links and make the slave plane invisible
11460 * in the crtc_state->active_planes mask.
11461 */
11462 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11463 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11464 continue;
11465
11466 plane_state->linked_plane = NULL;
11467 if (plane_state->slave && !plane_state->base.visible) {
11468 crtc_state->active_planes &= ~BIT(plane->id);
11469 crtc_state->update_planes |= BIT(plane->id);
11470 }
11471
11472 plane_state->slave = false;
11473 }
11474
11475 if (!crtc_state->nv12_planes)
11476 return 0;
11477
11478 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11479 struct intel_plane_state *linked_state = NULL;
11480
11481 if (plane->pipe != crtc->pipe ||
11482 !(crtc_state->nv12_planes & BIT(plane->id)))
11483 continue;
11484
11485 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11486 if (!icl_is_nv12_y_plane(linked->id))
11487 continue;
11488
11489 if (crtc_state->active_planes & BIT(linked->id))
11490 continue;
11491
11492 linked_state = intel_atomic_get_plane_state(state, linked);
11493 if (IS_ERR(linked_state))
11494 return PTR_ERR(linked_state);
11495
11496 break;
11497 }
11498
11499 if (!linked_state) {
11500 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11501 hweight8(crtc_state->nv12_planes));
11502
11503 return -EINVAL;
11504 }
11505
11506 plane_state->linked_plane = linked;
11507
11508 linked_state->slave = true;
11509 linked_state->linked_plane = plane;
11510 crtc_state->active_planes |= BIT(linked->id);
11511 crtc_state->update_planes |= BIT(linked->id);
11512 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11513 }
11514
11515 return 0;
11516 }
11517
11518 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11519 {
11520 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11521 struct intel_atomic_state *state =
11522 to_intel_atomic_state(new_crtc_state->base.state);
11523 const struct intel_crtc_state *old_crtc_state =
11524 intel_atomic_get_old_crtc_state(state, crtc);
11525
11526 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11527 }
11528
11529 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11530 struct drm_crtc_state *crtc_state)
11531 {
11532 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11533 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11534 struct intel_crtc_state *pipe_config =
11535 to_intel_crtc_state(crtc_state);
11536 int ret;
11537 bool mode_changed = needs_modeset(crtc_state);
11538
11539 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11540 mode_changed && !crtc_state->active)
11541 pipe_config->update_wm_post = true;
11542
11543 if (mode_changed && crtc_state->enable &&
11544 dev_priv->display.crtc_compute_clock &&
11545 !WARN_ON(pipe_config->shared_dpll)) {
11546 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11547 pipe_config);
11548 if (ret)
11549 return ret;
11550 }
11551
11552 /*
11553 * May need to update pipe gamma enable bits
11554 * when C8 planes are getting enabled/disabled.
11555 */
11556 if (c8_planes_changed(pipe_config))
11557 crtc_state->color_mgmt_changed = true;
11558
11559 if (mode_changed || pipe_config->update_pipe ||
11560 crtc_state->color_mgmt_changed) {
11561 ret = intel_color_check(pipe_config);
11562 if (ret)
11563 return ret;
11564 }
11565
11566 ret = 0;
11567 if (dev_priv->display.compute_pipe_wm) {
11568 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11569 if (ret) {
11570 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11571 return ret;
11572 }
11573 }
11574
11575 if (dev_priv->display.compute_intermediate_wm) {
11576 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11577 return 0;
11578
11579 /*
11580 * Calculate 'intermediate' watermarks that satisfy both the
11581 * old state and the new state. We can program these
11582 * immediately.
11583 */
11584 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11585 if (ret) {
11586 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11587 return ret;
11588 }
11589 }
11590
11591 if (INTEL_GEN(dev_priv) >= 9) {
11592 if (mode_changed || pipe_config->update_pipe)
11593 ret = skl_update_scaler_crtc(pipe_config);
11594
11595 if (!ret)
11596 ret = icl_check_nv12_planes(pipe_config);
11597 if (!ret)
11598 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11599 pipe_config);
11600 if (!ret)
11601 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11602 pipe_config);
11603 }
11604
11605 if (HAS_IPS(dev_priv))
11606 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11607
11608 return ret;
11609 }
11610
11611 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11612 .atomic_check = intel_crtc_atomic_check,
11613 };
11614
11615 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11616 {
11617 struct intel_connector *connector;
11618 struct drm_connector_list_iter conn_iter;
11619
11620 drm_connector_list_iter_begin(dev, &conn_iter);
11621 for_each_intel_connector_iter(connector, &conn_iter) {
11622 if (connector->base.state->crtc)
11623 drm_connector_put(&connector->base);
11624
11625 if (connector->base.encoder) {
11626 connector->base.state->best_encoder =
11627 connector->base.encoder;
11628 connector->base.state->crtc =
11629 connector->base.encoder->crtc;
11630
11631 drm_connector_get(&connector->base);
11632 } else {
11633 connector->base.state->best_encoder = NULL;
11634 connector->base.state->crtc = NULL;
11635 }
11636 }
11637 drm_connector_list_iter_end(&conn_iter);
11638 }
11639
11640 static int
11641 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11642 struct intel_crtc_state *pipe_config)
11643 {
11644 struct drm_connector *connector = conn_state->connector;
11645 const struct drm_display_info *info = &connector->display_info;
11646 int bpp;
11647
11648 switch (conn_state->max_bpc) {
11649 case 6 ... 7:
11650 bpp = 6 * 3;
11651 break;
11652 case 8 ... 9:
11653 bpp = 8 * 3;
11654 break;
11655 case 10 ... 11:
11656 bpp = 10 * 3;
11657 break;
11658 case 12:
11659 bpp = 12 * 3;
11660 break;
11661 default:
11662 return -EINVAL;
11663 }
11664
11665 if (bpp < pipe_config->pipe_bpp) {
11666 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11667 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11668 connector->base.id, connector->name,
11669 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11670 pipe_config->pipe_bpp);
11671
11672 pipe_config->pipe_bpp = bpp;
11673 }
11674
11675 return 0;
11676 }
11677
11678 static int
11679 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11680 struct intel_crtc_state *pipe_config)
11681 {
11682 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11683 struct drm_atomic_state *state = pipe_config->base.state;
11684 struct drm_connector *connector;
11685 struct drm_connector_state *connector_state;
11686 int bpp, i;
11687
11688 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11689 IS_CHERRYVIEW(dev_priv)))
11690 bpp = 10*3;
11691 else if (INTEL_GEN(dev_priv) >= 5)
11692 bpp = 12*3;
11693 else
11694 bpp = 8*3;
11695
11696 pipe_config->pipe_bpp = bpp;
11697
11698 /* Clamp display bpp to connector max bpp */
11699 for_each_new_connector_in_state(state, connector, connector_state, i) {
11700 int ret;
11701
11702 if (connector_state->crtc != &crtc->base)
11703 continue;
11704
11705 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11706 if (ret)
11707 return ret;
11708 }
11709
11710 return 0;
11711 }
11712
11713 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11714 {
11715 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11716 "type: 0x%x flags: 0x%x\n",
11717 mode->crtc_clock,
11718 mode->crtc_hdisplay, mode->crtc_hsync_start,
11719 mode->crtc_hsync_end, mode->crtc_htotal,
11720 mode->crtc_vdisplay, mode->crtc_vsync_start,
11721 mode->crtc_vsync_end, mode->crtc_vtotal,
11722 mode->type, mode->flags);
11723 }
11724
11725 static inline void
11726 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
11727 const char *id, unsigned int lane_count,
11728 const struct intel_link_m_n *m_n)
11729 {
11730 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11731 id, lane_count,
11732 m_n->gmch_m, m_n->gmch_n,
11733 m_n->link_m, m_n->link_n, m_n->tu);
11734 }
11735
11736 static void
11737 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11738 const union hdmi_infoframe *frame)
11739 {
11740 if ((drm_debug & DRM_UT_KMS) == 0)
11741 return;
11742
11743 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11744 }
11745
11746 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11747
11748 static const char * const output_type_str[] = {
11749 OUTPUT_TYPE(UNUSED),
11750 OUTPUT_TYPE(ANALOG),
11751 OUTPUT_TYPE(DVO),
11752 OUTPUT_TYPE(SDVO),
11753 OUTPUT_TYPE(LVDS),
11754 OUTPUT_TYPE(TVOUT),
11755 OUTPUT_TYPE(HDMI),
11756 OUTPUT_TYPE(DP),
11757 OUTPUT_TYPE(EDP),
11758 OUTPUT_TYPE(DSI),
11759 OUTPUT_TYPE(DDI),
11760 OUTPUT_TYPE(DP_MST),
11761 };
11762
11763 #undef OUTPUT_TYPE
11764
11765 static void snprintf_output_types(char *buf, size_t len,
11766 unsigned int output_types)
11767 {
11768 char *str = buf;
11769 int i;
11770
11771 str[0] = '\0';
11772
11773 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11774 int r;
11775
11776 if ((output_types & BIT(i)) == 0)
11777 continue;
11778
11779 r = snprintf(str, len, "%s%s",
11780 str != buf ? "," : "", output_type_str[i]);
11781 if (r >= len)
11782 break;
11783 str += r;
11784 len -= r;
11785
11786 output_types &= ~BIT(i);
11787 }
11788
11789 WARN_ON_ONCE(output_types != 0);
11790 }
11791
11792 static const char * const output_format_str[] = {
11793 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11794 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11795 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11796 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11797 };
11798
11799 static const char *output_formats(enum intel_output_format format)
11800 {
11801 if (format >= ARRAY_SIZE(output_format_str))
11802 format = INTEL_OUTPUT_FORMAT_INVALID;
11803 return output_format_str[format];
11804 }
11805
11806 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
11807 {
11808 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11809 const struct drm_framebuffer *fb = plane_state->base.fb;
11810 struct drm_format_name_buf format_name;
11811
11812 if (!fb) {
11813 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
11814 plane->base.base.id, plane->base.name,
11815 yesno(plane_state->base.visible));
11816 return;
11817 }
11818
11819 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
11820 plane->base.base.id, plane->base.name,
11821 fb->base.id, fb->width, fb->height,
11822 drm_get_format_name(fb->format->format, &format_name),
11823 yesno(plane_state->base.visible));
11824 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
11825 plane_state->base.rotation, plane_state->scaler_id);
11826 if (plane_state->base.visible)
11827 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
11828 DRM_RECT_FP_ARG(&plane_state->base.src),
11829 DRM_RECT_ARG(&plane_state->base.dst));
11830 }
11831
11832 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
11833 struct intel_atomic_state *state,
11834 const char *context)
11835 {
11836 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
11837 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11838 const struct intel_plane_state *plane_state;
11839 struct intel_plane *plane;
11840 char buf[64];
11841 int i;
11842
11843 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
11844 crtc->base.base.id, crtc->base.name,
11845 yesno(pipe_config->base.enable), context);
11846
11847 if (!pipe_config->base.enable)
11848 goto dump_planes;
11849
11850 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11851 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
11852 yesno(pipe_config->base.active),
11853 buf, pipe_config->output_types,
11854 output_formats(pipe_config->output_format));
11855
11856 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11857 transcoder_name(pipe_config->cpu_transcoder),
11858 pipe_config->pipe_bpp, pipe_config->dither);
11859
11860 if (pipe_config->has_pch_encoder)
11861 intel_dump_m_n_config(pipe_config, "fdi",
11862 pipe_config->fdi_lanes,
11863 &pipe_config->fdi_m_n);
11864
11865 if (intel_crtc_has_dp_encoder(pipe_config)) {
11866 intel_dump_m_n_config(pipe_config, "dp m_n",
11867 pipe_config->lane_count, &pipe_config->dp_m_n);
11868 if (pipe_config->has_drrs)
11869 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11870 pipe_config->lane_count,
11871 &pipe_config->dp_m2_n2);
11872 }
11873
11874 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
11875 pipe_config->has_audio, pipe_config->has_infoframe,
11876 pipe_config->infoframes.enable);
11877
11878 if (pipe_config->infoframes.enable &
11879 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11880 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11881 if (pipe_config->infoframes.enable &
11882 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11883 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11884 if (pipe_config->infoframes.enable &
11885 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11886 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11887 if (pipe_config->infoframes.enable &
11888 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11889 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11890
11891 DRM_DEBUG_KMS("requested mode:\n");
11892 drm_mode_debug_printmodeline(&pipe_config->base.mode);
11893 DRM_DEBUG_KMS("adjusted mode:\n");
11894 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11895 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11896 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11897 pipe_config->port_clock,
11898 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11899 pipe_config->pixel_rate);
11900
11901 if (INTEL_GEN(dev_priv) >= 9)
11902 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11903 crtc->num_scalers,
11904 pipe_config->scaler_state.scaler_users,
11905 pipe_config->scaler_state.scaler_id);
11906
11907 if (HAS_GMCH(dev_priv))
11908 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11909 pipe_config->gmch_pfit.control,
11910 pipe_config->gmch_pfit.pgm_ratios,
11911 pipe_config->gmch_pfit.lvds_border_bits);
11912 else
11913 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
11914 pipe_config->pch_pfit.pos,
11915 pipe_config->pch_pfit.size,
11916 enableddisabled(pipe_config->pch_pfit.enabled),
11917 yesno(pipe_config->pch_pfit.force_thru));
11918
11919 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11920 pipe_config->ips_enabled, pipe_config->double_wide);
11921
11922 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11923
11924 dump_planes:
11925 if (!state)
11926 return;
11927
11928 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11929 if (plane->pipe == crtc->pipe)
11930 intel_dump_plane_state(plane_state);
11931 }
11932 }
11933
11934 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
11935 {
11936 struct drm_device *dev = state->base.dev;
11937 struct drm_connector *connector;
11938 struct drm_connector_list_iter conn_iter;
11939 unsigned int used_ports = 0;
11940 unsigned int used_mst_ports = 0;
11941 bool ret = true;
11942
11943 /*
11944 * Walk the connector list instead of the encoder
11945 * list to detect the problem on ddi platforms
11946 * where there's just one encoder per digital port.
11947 */
11948 drm_connector_list_iter_begin(dev, &conn_iter);
11949 drm_for_each_connector_iter(connector, &conn_iter) {
11950 struct drm_connector_state *connector_state;
11951 struct intel_encoder *encoder;
11952
11953 connector_state =
11954 drm_atomic_get_new_connector_state(&state->base,
11955 connector);
11956 if (!connector_state)
11957 connector_state = connector->state;
11958
11959 if (!connector_state->best_encoder)
11960 continue;
11961
11962 encoder = to_intel_encoder(connector_state->best_encoder);
11963
11964 WARN_ON(!connector_state->crtc);
11965
11966 switch (encoder->type) {
11967 unsigned int port_mask;
11968 case INTEL_OUTPUT_DDI:
11969 if (WARN_ON(!HAS_DDI(to_i915(dev))))
11970 break;
11971 /* else: fall through */
11972 case INTEL_OUTPUT_DP:
11973 case INTEL_OUTPUT_HDMI:
11974 case INTEL_OUTPUT_EDP:
11975 port_mask = 1 << encoder->port;
11976
11977 /* the same port mustn't appear more than once */
11978 if (used_ports & port_mask)
11979 ret = false;
11980
11981 used_ports |= port_mask;
11982 break;
11983 case INTEL_OUTPUT_DP_MST:
11984 used_mst_ports |=
11985 1 << encoder->port;
11986 break;
11987 default:
11988 break;
11989 }
11990 }
11991 drm_connector_list_iter_end(&conn_iter);
11992
11993 /* can't mix MST and SST/HDMI on the same port */
11994 if (used_ports & used_mst_ports)
11995 return false;
11996
11997 return ret;
11998 }
11999
12000 static int
12001 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12002 {
12003 struct drm_i915_private *dev_priv =
12004 to_i915(crtc_state->base.crtc->dev);
12005 struct intel_crtc_state *saved_state;
12006
12007 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12008 if (!saved_state)
12009 return -ENOMEM;
12010
12011 /* FIXME: before the switch to atomic started, a new pipe_config was
12012 * kzalloc'd. Code that depends on any field being zero should be
12013 * fixed, so that the crtc_state can be safely duplicated. For now,
12014 * only fields that are know to not cause problems are preserved. */
12015
12016 saved_state->scaler_state = crtc_state->scaler_state;
12017 saved_state->shared_dpll = crtc_state->shared_dpll;
12018 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12019 saved_state->crc_enabled = crtc_state->crc_enabled;
12020 if (IS_G4X(dev_priv) ||
12021 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12022 saved_state->wm = crtc_state->wm;
12023
12024 /* Keep base drm_crtc_state intact, only clear our extended struct */
12025 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12026 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12027 sizeof(*crtc_state) - sizeof(crtc_state->base));
12028
12029 kfree(saved_state);
12030 return 0;
12031 }
12032
12033 static int
12034 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12035 {
12036 struct drm_crtc *crtc = pipe_config->base.crtc;
12037 struct drm_atomic_state *state = pipe_config->base.state;
12038 struct intel_encoder *encoder;
12039 struct drm_connector *connector;
12040 struct drm_connector_state *connector_state;
12041 int base_bpp, ret;
12042 int i;
12043 bool retry = true;
12044
12045 ret = clear_intel_crtc_state(pipe_config);
12046 if (ret)
12047 return ret;
12048
12049 pipe_config->cpu_transcoder =
12050 (enum transcoder) to_intel_crtc(crtc)->pipe;
12051
12052 /*
12053 * Sanitize sync polarity flags based on requested ones. If neither
12054 * positive or negative polarity is requested, treat this as meaning
12055 * negative polarity.
12056 */
12057 if (!(pipe_config->base.adjusted_mode.flags &
12058 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12059 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12060
12061 if (!(pipe_config->base.adjusted_mode.flags &
12062 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12063 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12064
12065 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12066 pipe_config);
12067 if (ret)
12068 return ret;
12069
12070 base_bpp = pipe_config->pipe_bpp;
12071
12072 /*
12073 * Determine the real pipe dimensions. Note that stereo modes can
12074 * increase the actual pipe size due to the frame doubling and
12075 * insertion of additional space for blanks between the frame. This
12076 * is stored in the crtc timings. We use the requested mode to do this
12077 * computation to clearly distinguish it from the adjusted mode, which
12078 * can be changed by the connectors in the below retry loop.
12079 */
12080 drm_mode_get_hv_timing(&pipe_config->base.mode,
12081 &pipe_config->pipe_src_w,
12082 &pipe_config->pipe_src_h);
12083
12084 for_each_new_connector_in_state(state, connector, connector_state, i) {
12085 if (connector_state->crtc != crtc)
12086 continue;
12087
12088 encoder = to_intel_encoder(connector_state->best_encoder);
12089
12090 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12091 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12092 return -EINVAL;
12093 }
12094
12095 /*
12096 * Determine output_types before calling the .compute_config()
12097 * hooks so that the hooks can use this information safely.
12098 */
12099 if (encoder->compute_output_type)
12100 pipe_config->output_types |=
12101 BIT(encoder->compute_output_type(encoder, pipe_config,
12102 connector_state));
12103 else
12104 pipe_config->output_types |= BIT(encoder->type);
12105 }
12106
12107 encoder_retry:
12108 /* Ensure the port clock defaults are reset when retrying. */
12109 pipe_config->port_clock = 0;
12110 pipe_config->pixel_multiplier = 1;
12111
12112 /* Fill in default crtc timings, allow encoders to overwrite them. */
12113 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12114 CRTC_STEREO_DOUBLE);
12115
12116 /* Pass our mode to the connectors and the CRTC to give them a chance to
12117 * adjust it according to limitations or connector properties, and also
12118 * a chance to reject the mode entirely.
12119 */
12120 for_each_new_connector_in_state(state, connector, connector_state, i) {
12121 if (connector_state->crtc != crtc)
12122 continue;
12123
12124 encoder = to_intel_encoder(connector_state->best_encoder);
12125 ret = encoder->compute_config(encoder, pipe_config,
12126 connector_state);
12127 if (ret < 0) {
12128 if (ret != -EDEADLK)
12129 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12130 ret);
12131 return ret;
12132 }
12133 }
12134
12135 /* Set default port clock if not overwritten by the encoder. Needs to be
12136 * done afterwards in case the encoder adjusts the mode. */
12137 if (!pipe_config->port_clock)
12138 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12139 * pipe_config->pixel_multiplier;
12140
12141 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12142 if (ret == -EDEADLK)
12143 return ret;
12144 if (ret < 0) {
12145 DRM_DEBUG_KMS("CRTC fixup failed\n");
12146 return ret;
12147 }
12148
12149 if (ret == RETRY) {
12150 if (WARN(!retry, "loop in pipe configuration computation\n"))
12151 return -EINVAL;
12152
12153 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12154 retry = false;
12155 goto encoder_retry;
12156 }
12157
12158 /* Dithering seems to not pass-through bits correctly when it should, so
12159 * only enable it on 6bpc panels and when its not a compliance
12160 * test requesting 6bpc video pattern.
12161 */
12162 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12163 !pipe_config->dither_force_disable;
12164 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12165 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12166
12167 return 0;
12168 }
12169
12170 bool intel_fuzzy_clock_check(int clock1, int clock2)
12171 {
12172 int diff;
12173
12174 if (clock1 == clock2)
12175 return true;
12176
12177 if (!clock1 || !clock2)
12178 return false;
12179
12180 diff = abs(clock1 - clock2);
12181
12182 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12183 return true;
12184
12185 return false;
12186 }
12187
12188 static bool
12189 intel_compare_m_n(unsigned int m, unsigned int n,
12190 unsigned int m2, unsigned int n2,
12191 bool exact)
12192 {
12193 if (m == m2 && n == n2)
12194 return true;
12195
12196 if (exact || !m || !n || !m2 || !n2)
12197 return false;
12198
12199 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12200
12201 if (n > n2) {
12202 while (n > n2) {
12203 m2 <<= 1;
12204 n2 <<= 1;
12205 }
12206 } else if (n < n2) {
12207 while (n < n2) {
12208 m <<= 1;
12209 n <<= 1;
12210 }
12211 }
12212
12213 if (n != n2)
12214 return false;
12215
12216 return intel_fuzzy_clock_check(m, m2);
12217 }
12218
12219 static bool
12220 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12221 struct intel_link_m_n *m2_n2,
12222 bool adjust)
12223 {
12224 if (m_n->tu == m2_n2->tu &&
12225 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12226 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12227 intel_compare_m_n(m_n->link_m, m_n->link_n,
12228 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12229 if (adjust)
12230 *m2_n2 = *m_n;
12231
12232 return true;
12233 }
12234
12235 return false;
12236 }
12237
12238 static bool
12239 intel_compare_infoframe(const union hdmi_infoframe *a,
12240 const union hdmi_infoframe *b)
12241 {
12242 return memcmp(a, b, sizeof(*a)) == 0;
12243 }
12244
12245 static void
12246 pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
12247 bool adjust, const char *name,
12248 const union hdmi_infoframe *a,
12249 const union hdmi_infoframe *b)
12250 {
12251 if (adjust) {
12252 if ((drm_debug & DRM_UT_KMS) == 0)
12253 return;
12254
12255 drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
12256 drm_dbg(DRM_UT_KMS, "expected:");
12257 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12258 drm_dbg(DRM_UT_KMS, "found");
12259 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12260 } else {
12261 drm_err("mismatch in %s infoframe", name);
12262 drm_err("expected:");
12263 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12264 drm_err("found");
12265 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12266 }
12267 }
12268
12269 static void __printf(3, 4)
12270 pipe_config_err(bool adjust, const char *name, const char *format, ...)
12271 {
12272 struct va_format vaf;
12273 va_list args;
12274
12275 va_start(args, format);
12276 vaf.fmt = format;
12277 vaf.va = &args;
12278
12279 if (adjust)
12280 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
12281 else
12282 drm_err("mismatch in %s %pV", name, &vaf);
12283
12284 va_end(args);
12285 }
12286
12287 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12288 {
12289 if (i915_modparams.fastboot != -1)
12290 return i915_modparams.fastboot;
12291
12292 /* Enable fastboot by default on Skylake and newer */
12293 if (INTEL_GEN(dev_priv) >= 9)
12294 return true;
12295
12296 /* Enable fastboot by default on VLV and CHV */
12297 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12298 return true;
12299
12300 /* Disabled by default on all others */
12301 return false;
12302 }
12303
12304 static bool
12305 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12306 struct intel_crtc_state *current_config,
12307 struct intel_crtc_state *pipe_config,
12308 bool adjust)
12309 {
12310 bool ret = true;
12311 bool fixup_inherited = adjust &&
12312 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12313 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12314
12315 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12316 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12317 ret = false;
12318 }
12319
12320 #define PIPE_CONF_CHECK_X(name) do { \
12321 if (current_config->name != pipe_config->name) { \
12322 pipe_config_err(adjust, __stringify(name), \
12323 "(expected 0x%08x, found 0x%08x)\n", \
12324 current_config->name, \
12325 pipe_config->name); \
12326 ret = false; \
12327 } \
12328 } while (0)
12329
12330 #define PIPE_CONF_CHECK_I(name) do { \
12331 if (current_config->name != pipe_config->name) { \
12332 pipe_config_err(adjust, __stringify(name), \
12333 "(expected %i, found %i)\n", \
12334 current_config->name, \
12335 pipe_config->name); \
12336 ret = false; \
12337 } \
12338 } while (0)
12339
12340 #define PIPE_CONF_CHECK_BOOL(name) do { \
12341 if (current_config->name != pipe_config->name) { \
12342 pipe_config_err(adjust, __stringify(name), \
12343 "(expected %s, found %s)\n", \
12344 yesno(current_config->name), \
12345 yesno(pipe_config->name)); \
12346 ret = false; \
12347 } \
12348 } while (0)
12349
12350 /*
12351 * Checks state where we only read out the enabling, but not the entire
12352 * state itself (like full infoframes or ELD for audio). These states
12353 * require a full modeset on bootup to fix up.
12354 */
12355 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12356 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12357 PIPE_CONF_CHECK_BOOL(name); \
12358 } else { \
12359 pipe_config_err(adjust, __stringify(name), \
12360 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12361 yesno(current_config->name), \
12362 yesno(pipe_config->name)); \
12363 ret = false; \
12364 } \
12365 } while (0)
12366
12367 #define PIPE_CONF_CHECK_P(name) do { \
12368 if (current_config->name != pipe_config->name) { \
12369 pipe_config_err(adjust, __stringify(name), \
12370 "(expected %p, found %p)\n", \
12371 current_config->name, \
12372 pipe_config->name); \
12373 ret = false; \
12374 } \
12375 } while (0)
12376
12377 #define PIPE_CONF_CHECK_M_N(name) do { \
12378 if (!intel_compare_link_m_n(&current_config->name, \
12379 &pipe_config->name,\
12380 adjust)) { \
12381 pipe_config_err(adjust, __stringify(name), \
12382 "(expected tu %i gmch %i/%i link %i/%i, " \
12383 "found tu %i, gmch %i/%i link %i/%i)\n", \
12384 current_config->name.tu, \
12385 current_config->name.gmch_m, \
12386 current_config->name.gmch_n, \
12387 current_config->name.link_m, \
12388 current_config->name.link_n, \
12389 pipe_config->name.tu, \
12390 pipe_config->name.gmch_m, \
12391 pipe_config->name.gmch_n, \
12392 pipe_config->name.link_m, \
12393 pipe_config->name.link_n); \
12394 ret = false; \
12395 } \
12396 } while (0)
12397
12398 /* This is required for BDW+ where there is only one set of registers for
12399 * switching between high and low RR.
12400 * This macro can be used whenever a comparison has to be made between one
12401 * hw state and multiple sw state variables.
12402 */
12403 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12404 if (!intel_compare_link_m_n(&current_config->name, \
12405 &pipe_config->name, adjust) && \
12406 !intel_compare_link_m_n(&current_config->alt_name, \
12407 &pipe_config->name, adjust)) { \
12408 pipe_config_err(adjust, __stringify(name), \
12409 "(expected tu %i gmch %i/%i link %i/%i, " \
12410 "or tu %i gmch %i/%i link %i/%i, " \
12411 "found tu %i, gmch %i/%i link %i/%i)\n", \
12412 current_config->name.tu, \
12413 current_config->name.gmch_m, \
12414 current_config->name.gmch_n, \
12415 current_config->name.link_m, \
12416 current_config->name.link_n, \
12417 current_config->alt_name.tu, \
12418 current_config->alt_name.gmch_m, \
12419 current_config->alt_name.gmch_n, \
12420 current_config->alt_name.link_m, \
12421 current_config->alt_name.link_n, \
12422 pipe_config->name.tu, \
12423 pipe_config->name.gmch_m, \
12424 pipe_config->name.gmch_n, \
12425 pipe_config->name.link_m, \
12426 pipe_config->name.link_n); \
12427 ret = false; \
12428 } \
12429 } while (0)
12430
12431 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12432 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12433 pipe_config_err(adjust, __stringify(name), \
12434 "(%x) (expected %i, found %i)\n", \
12435 (mask), \
12436 current_config->name & (mask), \
12437 pipe_config->name & (mask)); \
12438 ret = false; \
12439 } \
12440 } while (0)
12441
12442 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12443 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12444 pipe_config_err(adjust, __stringify(name), \
12445 "(expected %i, found %i)\n", \
12446 current_config->name, \
12447 pipe_config->name); \
12448 ret = false; \
12449 } \
12450 } while (0)
12451
12452 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12453 if (!intel_compare_infoframe(&current_config->infoframes.name, \
12454 &pipe_config->infoframes.name)) { \
12455 pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
12456 &current_config->infoframes.name, \
12457 &pipe_config->infoframes.name); \
12458 ret = false; \
12459 } \
12460 } while (0)
12461
12462 #define PIPE_CONF_QUIRK(quirk) \
12463 ((current_config->quirks | pipe_config->quirks) & (quirk))
12464
12465 PIPE_CONF_CHECK_I(cpu_transcoder);
12466
12467 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12468 PIPE_CONF_CHECK_I(fdi_lanes);
12469 PIPE_CONF_CHECK_M_N(fdi_m_n);
12470
12471 PIPE_CONF_CHECK_I(lane_count);
12472 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12473
12474 if (INTEL_GEN(dev_priv) < 8) {
12475 PIPE_CONF_CHECK_M_N(dp_m_n);
12476
12477 if (current_config->has_drrs)
12478 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12479 } else
12480 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12481
12482 PIPE_CONF_CHECK_X(output_types);
12483
12484 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12485 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12486 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12487 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12488 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12489 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12490
12491 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12492 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12493 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12494 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12495 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12496 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12497
12498 PIPE_CONF_CHECK_I(pixel_multiplier);
12499 PIPE_CONF_CHECK_I(output_format);
12500 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12501 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12502 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12503 PIPE_CONF_CHECK_BOOL(limited_color_range);
12504
12505 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12506 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12507 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12508
12509 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12510
12511 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12512 DRM_MODE_FLAG_INTERLACE);
12513
12514 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12515 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12516 DRM_MODE_FLAG_PHSYNC);
12517 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12518 DRM_MODE_FLAG_NHSYNC);
12519 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12520 DRM_MODE_FLAG_PVSYNC);
12521 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12522 DRM_MODE_FLAG_NVSYNC);
12523 }
12524
12525 PIPE_CONF_CHECK_X(gmch_pfit.control);
12526 /* pfit ratios are autocomputed by the hw on gen4+ */
12527 if (INTEL_GEN(dev_priv) < 4)
12528 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12529 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12530
12531 /*
12532 * Changing the EDP transcoder input mux
12533 * (A_ONOFF vs. A_ON) requires a full modeset.
12534 */
12535 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
12536
12537 if (!adjust) {
12538 PIPE_CONF_CHECK_I(pipe_src_w);
12539 PIPE_CONF_CHECK_I(pipe_src_h);
12540
12541 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12542 if (current_config->pch_pfit.enabled) {
12543 PIPE_CONF_CHECK_X(pch_pfit.pos);
12544 PIPE_CONF_CHECK_X(pch_pfit.size);
12545 }
12546
12547 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12548 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12549
12550 PIPE_CONF_CHECK_X(gamma_mode);
12551 if (IS_CHERRYVIEW(dev_priv))
12552 PIPE_CONF_CHECK_X(cgm_mode);
12553 else
12554 PIPE_CONF_CHECK_X(csc_mode);
12555 PIPE_CONF_CHECK_BOOL(gamma_enable);
12556 PIPE_CONF_CHECK_BOOL(csc_enable);
12557 }
12558
12559 PIPE_CONF_CHECK_BOOL(double_wide);
12560
12561 PIPE_CONF_CHECK_P(shared_dpll);
12562 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12563 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12564 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12565 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12566 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12567 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12568 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12569 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12570 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12571 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12572 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12573 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12574 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12575 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12576 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12577 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12578 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12579 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12580 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12581 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12582 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12583 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12584 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12585 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12586 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12587 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12588 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12589 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12590 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12591 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12592 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12593
12594 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12595 PIPE_CONF_CHECK_X(dsi_pll.div);
12596
12597 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12598 PIPE_CONF_CHECK_I(pipe_bpp);
12599
12600 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12601 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12602
12603 PIPE_CONF_CHECK_I(min_voltage_level);
12604
12605 PIPE_CONF_CHECK_X(infoframes.enable);
12606 PIPE_CONF_CHECK_X(infoframes.gcp);
12607 PIPE_CONF_CHECK_INFOFRAME(avi);
12608 PIPE_CONF_CHECK_INFOFRAME(spd);
12609 PIPE_CONF_CHECK_INFOFRAME(hdmi);
12610 PIPE_CONF_CHECK_INFOFRAME(drm);
12611
12612 #undef PIPE_CONF_CHECK_X
12613 #undef PIPE_CONF_CHECK_I
12614 #undef PIPE_CONF_CHECK_BOOL
12615 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12616 #undef PIPE_CONF_CHECK_P
12617 #undef PIPE_CONF_CHECK_FLAGS
12618 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12619 #undef PIPE_CONF_QUIRK
12620
12621 return ret;
12622 }
12623
12624 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12625 const struct intel_crtc_state *pipe_config)
12626 {
12627 if (pipe_config->has_pch_encoder) {
12628 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12629 &pipe_config->fdi_m_n);
12630 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12631
12632 /*
12633 * FDI already provided one idea for the dotclock.
12634 * Yell if the encoder disagrees.
12635 */
12636 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12637 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12638 fdi_dotclock, dotclock);
12639 }
12640 }
12641
12642 static void verify_wm_state(struct drm_crtc *crtc,
12643 struct drm_crtc_state *new_state)
12644 {
12645 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12646 struct skl_hw_state {
12647 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12648 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12649 struct skl_ddb_allocation ddb;
12650 struct skl_pipe_wm wm;
12651 } *hw;
12652 struct skl_ddb_allocation *sw_ddb;
12653 struct skl_pipe_wm *sw_wm;
12654 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12656 const enum pipe pipe = intel_crtc->pipe;
12657 int plane, level, max_level = ilk_wm_max_level(dev_priv);
12658
12659 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12660 return;
12661
12662 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12663 if (!hw)
12664 return;
12665
12666 skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
12667 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12668
12669 skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
12670
12671 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12672 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12673
12674 if (INTEL_GEN(dev_priv) >= 11 &&
12675 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12676 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12677 sw_ddb->enabled_slices,
12678 hw->ddb.enabled_slices);
12679
12680 /* planes */
12681 for_each_universal_plane(dev_priv, pipe, plane) {
12682 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12683
12684 hw_plane_wm = &hw->wm.planes[plane];
12685 sw_plane_wm = &sw_wm->planes[plane];
12686
12687 /* Watermarks */
12688 for (level = 0; level <= max_level; level++) {
12689 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12690 &sw_plane_wm->wm[level]))
12691 continue;
12692
12693 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12694 pipe_name(pipe), plane + 1, level,
12695 sw_plane_wm->wm[level].plane_en,
12696 sw_plane_wm->wm[level].plane_res_b,
12697 sw_plane_wm->wm[level].plane_res_l,
12698 hw_plane_wm->wm[level].plane_en,
12699 hw_plane_wm->wm[level].plane_res_b,
12700 hw_plane_wm->wm[level].plane_res_l);
12701 }
12702
12703 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12704 &sw_plane_wm->trans_wm)) {
12705 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12706 pipe_name(pipe), plane + 1,
12707 sw_plane_wm->trans_wm.plane_en,
12708 sw_plane_wm->trans_wm.plane_res_b,
12709 sw_plane_wm->trans_wm.plane_res_l,
12710 hw_plane_wm->trans_wm.plane_en,
12711 hw_plane_wm->trans_wm.plane_res_b,
12712 hw_plane_wm->trans_wm.plane_res_l);
12713 }
12714
12715 /* DDB */
12716 hw_ddb_entry = &hw->ddb_y[plane];
12717 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12718
12719 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12720 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12721 pipe_name(pipe), plane + 1,
12722 sw_ddb_entry->start, sw_ddb_entry->end,
12723 hw_ddb_entry->start, hw_ddb_entry->end);
12724 }
12725 }
12726
12727 /*
12728 * cursor
12729 * If the cursor plane isn't active, we may not have updated it's ddb
12730 * allocation. In that case since the ddb allocation will be updated
12731 * once the plane becomes visible, we can skip this check
12732 */
12733 if (1) {
12734 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12735
12736 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12737 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12738
12739 /* Watermarks */
12740 for (level = 0; level <= max_level; level++) {
12741 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12742 &sw_plane_wm->wm[level]))
12743 continue;
12744
12745 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12746 pipe_name(pipe), level,
12747 sw_plane_wm->wm[level].plane_en,
12748 sw_plane_wm->wm[level].plane_res_b,
12749 sw_plane_wm->wm[level].plane_res_l,
12750 hw_plane_wm->wm[level].plane_en,
12751 hw_plane_wm->wm[level].plane_res_b,
12752 hw_plane_wm->wm[level].plane_res_l);
12753 }
12754
12755 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12756 &sw_plane_wm->trans_wm)) {
12757 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12758 pipe_name(pipe),
12759 sw_plane_wm->trans_wm.plane_en,
12760 sw_plane_wm->trans_wm.plane_res_b,
12761 sw_plane_wm->trans_wm.plane_res_l,
12762 hw_plane_wm->trans_wm.plane_en,
12763 hw_plane_wm->trans_wm.plane_res_b,
12764 hw_plane_wm->trans_wm.plane_res_l);
12765 }
12766
12767 /* DDB */
12768 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12769 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12770
12771 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12772 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12773 pipe_name(pipe),
12774 sw_ddb_entry->start, sw_ddb_entry->end,
12775 hw_ddb_entry->start, hw_ddb_entry->end);
12776 }
12777 }
12778
12779 kfree(hw);
12780 }
12781
12782 static void
12783 verify_connector_state(struct drm_device *dev,
12784 struct drm_atomic_state *state,
12785 struct drm_crtc *crtc)
12786 {
12787 struct drm_connector *connector;
12788 struct drm_connector_state *new_conn_state;
12789 int i;
12790
12791 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12792 struct drm_encoder *encoder = connector->encoder;
12793 struct drm_crtc_state *crtc_state = NULL;
12794
12795 if (new_conn_state->crtc != crtc)
12796 continue;
12797
12798 if (crtc)
12799 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12800
12801 intel_connector_verify_state(crtc_state, new_conn_state);
12802
12803 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12804 "connector's atomic encoder doesn't match legacy encoder\n");
12805 }
12806 }
12807
12808 static void
12809 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12810 {
12811 struct intel_encoder *encoder;
12812 struct drm_connector *connector;
12813 struct drm_connector_state *old_conn_state, *new_conn_state;
12814 int i;
12815
12816 for_each_intel_encoder(dev, encoder) {
12817 bool enabled = false, found = false;
12818 enum pipe pipe;
12819
12820 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12821 encoder->base.base.id,
12822 encoder->base.name);
12823
12824 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12825 new_conn_state, i) {
12826 if (old_conn_state->best_encoder == &encoder->base)
12827 found = true;
12828
12829 if (new_conn_state->best_encoder != &encoder->base)
12830 continue;
12831 found = enabled = true;
12832
12833 I915_STATE_WARN(new_conn_state->crtc !=
12834 encoder->base.crtc,
12835 "connector's crtc doesn't match encoder crtc\n");
12836 }
12837
12838 if (!found)
12839 continue;
12840
12841 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12842 "encoder's enabled state mismatch "
12843 "(expected %i, found %i)\n",
12844 !!encoder->base.crtc, enabled);
12845
12846 if (!encoder->base.crtc) {
12847 bool active;
12848
12849 active = encoder->get_hw_state(encoder, &pipe);
12850 I915_STATE_WARN(active,
12851 "encoder detached but still enabled on pipe %c.\n",
12852 pipe_name(pipe));
12853 }
12854 }
12855 }
12856
12857 static void
12858 verify_crtc_state(struct drm_crtc *crtc,
12859 struct drm_crtc_state *old_crtc_state,
12860 struct drm_crtc_state *new_crtc_state)
12861 {
12862 struct drm_device *dev = crtc->dev;
12863 struct drm_i915_private *dev_priv = to_i915(dev);
12864 struct intel_encoder *encoder;
12865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12866 struct intel_crtc_state *pipe_config, *sw_config;
12867 struct drm_atomic_state *old_state;
12868 bool active;
12869
12870 old_state = old_crtc_state->state;
12871 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12872 pipe_config = to_intel_crtc_state(old_crtc_state);
12873 memset(pipe_config, 0, sizeof(*pipe_config));
12874 pipe_config->base.crtc = crtc;
12875 pipe_config->base.state = old_state;
12876
12877 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12878
12879 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12880
12881 /* we keep both pipes enabled on 830 */
12882 if (IS_I830(dev_priv))
12883 active = new_crtc_state->active;
12884
12885 I915_STATE_WARN(new_crtc_state->active != active,
12886 "crtc active state doesn't match with hw state "
12887 "(expected %i, found %i)\n", new_crtc_state->active, active);
12888
12889 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12890 "transitional active state does not match atomic hw state "
12891 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12892
12893 for_each_encoder_on_crtc(dev, crtc, encoder) {
12894 enum pipe pipe;
12895
12896 active = encoder->get_hw_state(encoder, &pipe);
12897 I915_STATE_WARN(active != new_crtc_state->active,
12898 "[ENCODER:%i] active %i with crtc active %i\n",
12899 encoder->base.base.id, active, new_crtc_state->active);
12900
12901 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12902 "Encoder connected to wrong pipe %c\n",
12903 pipe_name(pipe));
12904
12905 if (active)
12906 encoder->get_config(encoder, pipe_config);
12907 }
12908
12909 intel_crtc_compute_pixel_rate(pipe_config);
12910
12911 if (!new_crtc_state->active)
12912 return;
12913
12914 intel_pipe_config_sanity_check(dev_priv, pipe_config);
12915
12916 sw_config = to_intel_crtc_state(new_crtc_state);
12917 if (!intel_pipe_config_compare(dev_priv, sw_config,
12918 pipe_config, false)) {
12919 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12920 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
12921 intel_dump_pipe_config(sw_config, NULL, "[sw state]");
12922 }
12923 }
12924
12925 static void
12926 intel_verify_planes(struct intel_atomic_state *state)
12927 {
12928 struct intel_plane *plane;
12929 const struct intel_plane_state *plane_state;
12930 int i;
12931
12932 for_each_new_intel_plane_in_state(state, plane,
12933 plane_state, i)
12934 assert_plane(plane, plane_state->slave ||
12935 plane_state->base.visible);
12936 }
12937
12938 static void
12939 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12940 struct intel_shared_dpll *pll,
12941 struct drm_crtc *crtc,
12942 struct drm_crtc_state *new_state)
12943 {
12944 struct intel_dpll_hw_state dpll_hw_state;
12945 unsigned int crtc_mask;
12946 bool active;
12947
12948 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12949
12950 DRM_DEBUG_KMS("%s\n", pll->info->name);
12951
12952 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12953
12954 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12955 I915_STATE_WARN(!pll->on && pll->active_mask,
12956 "pll in active use but not on in sw tracking\n");
12957 I915_STATE_WARN(pll->on && !pll->active_mask,
12958 "pll is on but not used by any active crtc\n");
12959 I915_STATE_WARN(pll->on != active,
12960 "pll on state mismatch (expected %i, found %i)\n",
12961 pll->on, active);
12962 }
12963
12964 if (!crtc) {
12965 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12966 "more active pll users than references: %x vs %x\n",
12967 pll->active_mask, pll->state.crtc_mask);
12968
12969 return;
12970 }
12971
12972 crtc_mask = drm_crtc_mask(crtc);
12973
12974 if (new_state->active)
12975 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12976 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12977 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12978 else
12979 I915_STATE_WARN(pll->active_mask & crtc_mask,
12980 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12981 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12982
12983 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12984 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12985 crtc_mask, pll->state.crtc_mask);
12986
12987 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12988 &dpll_hw_state,
12989 sizeof(dpll_hw_state)),
12990 "pll hw state mismatch\n");
12991 }
12992
12993 static void
12994 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12995 struct drm_crtc_state *old_crtc_state,
12996 struct drm_crtc_state *new_crtc_state)
12997 {
12998 struct drm_i915_private *dev_priv = to_i915(dev);
12999 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13000 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13001
13002 if (new_state->shared_dpll)
13003 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13004
13005 if (old_state->shared_dpll &&
13006 old_state->shared_dpll != new_state->shared_dpll) {
13007 unsigned int crtc_mask = drm_crtc_mask(crtc);
13008 struct intel_shared_dpll *pll = old_state->shared_dpll;
13009
13010 I915_STATE_WARN(pll->active_mask & crtc_mask,
13011 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13012 pipe_name(drm_crtc_index(crtc)));
13013 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13014 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13015 pipe_name(drm_crtc_index(crtc)));
13016 }
13017 }
13018
13019 static void
13020 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13021 struct drm_atomic_state *state,
13022 struct drm_crtc_state *old_state,
13023 struct drm_crtc_state *new_state)
13024 {
13025 if (!needs_modeset(new_state) &&
13026 !to_intel_crtc_state(new_state)->update_pipe)
13027 return;
13028
13029 verify_wm_state(crtc, new_state);
13030 verify_connector_state(crtc->dev, state, crtc);
13031 verify_crtc_state(crtc, old_state, new_state);
13032 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13033 }
13034
13035 static void
13036 verify_disabled_dpll_state(struct drm_device *dev)
13037 {
13038 struct drm_i915_private *dev_priv = to_i915(dev);
13039 int i;
13040
13041 for (i = 0; i < dev_priv->num_shared_dpll; i++)
13042 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13043 }
13044
13045 static void
13046 intel_modeset_verify_disabled(struct drm_device *dev,
13047 struct drm_atomic_state *state)
13048 {
13049 verify_encoder_state(dev, state);
13050 verify_connector_state(dev, state, NULL);
13051 verify_disabled_dpll_state(dev);
13052 }
13053
13054 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
13055 {
13056 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13057 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13058
13059 /*
13060 * The scanline counter increments at the leading edge of hsync.
13061 *
13062 * On most platforms it starts counting from vtotal-1 on the
13063 * first active line. That means the scanline counter value is
13064 * always one less than what we would expect. Ie. just after
13065 * start of vblank, which also occurs at start of hsync (on the
13066 * last active line), the scanline counter will read vblank_start-1.
13067 *
13068 * On gen2 the scanline counter starts counting from 1 instead
13069 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13070 * to keep the value positive), instead of adding one.
13071 *
13072 * On HSW+ the behaviour of the scanline counter depends on the output
13073 * type. For DP ports it behaves like most other platforms, but on HDMI
13074 * there's an extra 1 line difference. So we need to add two instead of
13075 * one to the value.
13076 *
13077 * On VLV/CHV DSI the scanline counter would appear to increment
13078 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13079 * that means we can't tell whether we're in vblank or not while
13080 * we're on that particular line. We must still set scanline_offset
13081 * to 1 so that the vblank timestamps come out correct when we query
13082 * the scanline counter from within the vblank interrupt handler.
13083 * However if queried just before the start of vblank we'll get an
13084 * answer that's slightly in the future.
13085 */
13086 if (IS_GEN(dev_priv, 2)) {
13087 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
13088 int vtotal;
13089
13090 vtotal = adjusted_mode->crtc_vtotal;
13091 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13092 vtotal /= 2;
13093
13094 crtc->scanline_offset = vtotal - 1;
13095 } else if (HAS_DDI(dev_priv) &&
13096 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13097 crtc->scanline_offset = 2;
13098 } else
13099 crtc->scanline_offset = 1;
13100 }
13101
13102 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13103 {
13104 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13105 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13106 struct intel_crtc *crtc;
13107 int i;
13108
13109 if (!dev_priv->display.crtc_compute_clock)
13110 return;
13111
13112 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13113 new_crtc_state, i) {
13114 struct intel_shared_dpll *old_dpll =
13115 old_crtc_state->shared_dpll;
13116
13117 if (!needs_modeset(&new_crtc_state->base))
13118 continue;
13119
13120 new_crtc_state->shared_dpll = NULL;
13121
13122 if (!old_dpll)
13123 continue;
13124
13125 intel_release_shared_dpll(old_dpll, crtc, &state->base);
13126 }
13127 }
13128
13129 /*
13130 * This implements the workaround described in the "notes" section of the mode
13131 * set sequence documentation. When going from no pipes or single pipe to
13132 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13133 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13134 */
13135 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13136 {
13137 struct intel_crtc_state *crtc_state;
13138 struct intel_crtc *crtc;
13139 struct intel_crtc_state *first_crtc_state = NULL;
13140 struct intel_crtc_state *other_crtc_state = NULL;
13141 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13142 int i;
13143
13144 /* look at all crtc's that are going to be enabled in during modeset */
13145 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13146 if (!crtc_state->base.active ||
13147 !needs_modeset(&crtc_state->base))
13148 continue;
13149
13150 if (first_crtc_state) {
13151 other_crtc_state = crtc_state;
13152 break;
13153 } else {
13154 first_crtc_state = crtc_state;
13155 first_pipe = crtc->pipe;
13156 }
13157 }
13158
13159 /* No workaround needed? */
13160 if (!first_crtc_state)
13161 return 0;
13162
13163 /* w/a possibly needed, check how many crtc's are already enabled. */
13164 for_each_intel_crtc(state->base.dev, crtc) {
13165 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13166 if (IS_ERR(crtc_state))
13167 return PTR_ERR(crtc_state);
13168
13169 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13170
13171 if (!crtc_state->base.active ||
13172 needs_modeset(&crtc_state->base))
13173 continue;
13174
13175 /* 2 or more enabled crtcs means no need for w/a */
13176 if (enabled_pipe != INVALID_PIPE)
13177 return 0;
13178
13179 enabled_pipe = crtc->pipe;
13180 }
13181
13182 if (enabled_pipe != INVALID_PIPE)
13183 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13184 else if (other_crtc_state)
13185 other_crtc_state->hsw_workaround_pipe = first_pipe;
13186
13187 return 0;
13188 }
13189
13190 static int intel_lock_all_pipes(struct drm_atomic_state *state)
13191 {
13192 struct drm_crtc *crtc;
13193
13194 /* Add all pipes to the state */
13195 for_each_crtc(state->dev, crtc) {
13196 struct drm_crtc_state *crtc_state;
13197
13198 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13199 if (IS_ERR(crtc_state))
13200 return PTR_ERR(crtc_state);
13201 }
13202
13203 return 0;
13204 }
13205
13206 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13207 {
13208 struct drm_crtc *crtc;
13209
13210 /*
13211 * Add all pipes to the state, and force
13212 * a modeset on all the active ones.
13213 */
13214 for_each_crtc(state->dev, crtc) {
13215 struct drm_crtc_state *crtc_state;
13216 int ret;
13217
13218 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13219 if (IS_ERR(crtc_state))
13220 return PTR_ERR(crtc_state);
13221
13222 if (!crtc_state->active || needs_modeset(crtc_state))
13223 continue;
13224
13225 crtc_state->mode_changed = true;
13226
13227 ret = drm_atomic_add_affected_connectors(state, crtc);
13228 if (ret)
13229 return ret;
13230
13231 ret = drm_atomic_add_affected_planes(state, crtc);
13232 if (ret)
13233 return ret;
13234 }
13235
13236 return 0;
13237 }
13238
13239 static int intel_modeset_checks(struct intel_atomic_state *state)
13240 {
13241 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13242 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13243 struct intel_crtc *crtc;
13244 int ret = 0, i;
13245
13246 if (!check_digital_port_conflicts(state)) {
13247 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13248 return -EINVAL;
13249 }
13250
13251 /* keep the current setting */
13252 if (!state->cdclk.force_min_cdclk_changed)
13253 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13254
13255 state->modeset = true;
13256 state->active_crtcs = dev_priv->active_crtcs;
13257 state->cdclk.logical = dev_priv->cdclk.logical;
13258 state->cdclk.actual = dev_priv->cdclk.actual;
13259 state->cdclk.pipe = INVALID_PIPE;
13260
13261 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13262 new_crtc_state, i) {
13263 if (new_crtc_state->base.active)
13264 state->active_crtcs |= 1 << i;
13265 else
13266 state->active_crtcs &= ~(1 << i);
13267
13268 if (old_crtc_state->base.active != new_crtc_state->base.active)
13269 state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
13270 }
13271
13272 /*
13273 * See if the config requires any additional preparation, e.g.
13274 * to adjust global state with pipes off. We need to do this
13275 * here so we can get the modeset_pipe updated config for the new
13276 * mode set on this crtc. For other crtcs we need to use the
13277 * adjusted_mode bits in the crtc directly.
13278 */
13279 if (dev_priv->display.modeset_calc_cdclk) {
13280 enum pipe pipe;
13281
13282 ret = dev_priv->display.modeset_calc_cdclk(state);
13283 if (ret < 0)
13284 return ret;
13285
13286 /*
13287 * Writes to dev_priv->cdclk.logical must protected by
13288 * holding all the crtc locks, even if we don't end up
13289 * touching the hardware
13290 */
13291 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13292 &state->cdclk.logical)) {
13293 ret = intel_lock_all_pipes(&state->base);
13294 if (ret < 0)
13295 return ret;
13296 }
13297
13298 if (is_power_of_2(state->active_crtcs)) {
13299 struct drm_crtc *crtc;
13300 struct drm_crtc_state *crtc_state;
13301
13302 pipe = ilog2(state->active_crtcs);
13303 crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
13304 crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
13305 if (crtc_state && needs_modeset(crtc_state))
13306 pipe = INVALID_PIPE;
13307 } else {
13308 pipe = INVALID_PIPE;
13309 }
13310
13311 /* All pipes must be switched off while we change the cdclk. */
13312 if (pipe != INVALID_PIPE &&
13313 intel_cdclk_needs_cd2x_update(dev_priv,
13314 &dev_priv->cdclk.actual,
13315 &state->cdclk.actual)) {
13316 ret = intel_lock_all_pipes(&state->base);
13317 if (ret < 0)
13318 return ret;
13319
13320 state->cdclk.pipe = pipe;
13321 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13322 &state->cdclk.actual)) {
13323 ret = intel_modeset_all_pipes(&state->base);
13324 if (ret < 0)
13325 return ret;
13326
13327 state->cdclk.pipe = INVALID_PIPE;
13328 }
13329
13330 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13331 state->cdclk.logical.cdclk,
13332 state->cdclk.actual.cdclk);
13333 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13334 state->cdclk.logical.voltage_level,
13335 state->cdclk.actual.voltage_level);
13336 }
13337
13338 intel_modeset_clear_plls(state);
13339
13340 if (IS_HASWELL(dev_priv))
13341 return haswell_mode_set_planes_workaround(state);
13342
13343 return 0;
13344 }
13345
13346 /*
13347 * Handle calculation of various watermark data at the end of the atomic check
13348 * phase. The code here should be run after the per-crtc and per-plane 'check'
13349 * handlers to ensure that all derived state has been updated.
13350 */
13351 static int calc_watermark_data(struct intel_atomic_state *state)
13352 {
13353 struct drm_device *dev = state->base.dev;
13354 struct drm_i915_private *dev_priv = to_i915(dev);
13355
13356 /* Is there platform-specific watermark information to calculate? */
13357 if (dev_priv->display.compute_global_watermarks)
13358 return dev_priv->display.compute_global_watermarks(state);
13359
13360 return 0;
13361 }
13362
13363 /**
13364 * intel_atomic_check - validate state object
13365 * @dev: drm device
13366 * @state: state to validate
13367 */
13368 static int intel_atomic_check(struct drm_device *dev,
13369 struct drm_atomic_state *_state)
13370 {
13371 struct drm_i915_private *dev_priv = to_i915(dev);
13372 struct intel_atomic_state *state = to_intel_atomic_state(_state);
13373 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13374 struct intel_crtc *crtc;
13375 int ret, i;
13376 bool any_ms = state->cdclk.force_min_cdclk_changed;
13377
13378 /* Catch I915_MODE_FLAG_INHERITED */
13379 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13380 new_crtc_state, i) {
13381 if (new_crtc_state->base.mode.private_flags !=
13382 old_crtc_state->base.mode.private_flags)
13383 new_crtc_state->base.mode_changed = true;
13384 }
13385
13386 ret = drm_atomic_helper_check_modeset(dev, &state->base);
13387 if (ret)
13388 goto fail;
13389
13390 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13391 new_crtc_state, i) {
13392 if (!needs_modeset(&new_crtc_state->base))
13393 continue;
13394
13395 if (!new_crtc_state->base.enable) {
13396 any_ms = true;
13397 continue;
13398 }
13399
13400 ret = intel_modeset_pipe_config(new_crtc_state);
13401 if (ret)
13402 goto fail;
13403
13404 if (intel_pipe_config_compare(dev_priv, old_crtc_state,
13405 new_crtc_state, true)) {
13406 new_crtc_state->base.mode_changed = false;
13407 new_crtc_state->update_pipe = true;
13408 }
13409
13410 if (needs_modeset(&new_crtc_state->base))
13411 any_ms = true;
13412 }
13413
13414 ret = drm_dp_mst_atomic_check(&state->base);
13415 if (ret)
13416 goto fail;
13417
13418 if (any_ms) {
13419 ret = intel_modeset_checks(state);
13420 if (ret)
13421 goto fail;
13422 } else {
13423 state->cdclk.logical = dev_priv->cdclk.logical;
13424 }
13425
13426 ret = icl_add_linked_planes(state);
13427 if (ret)
13428 goto fail;
13429
13430 ret = drm_atomic_helper_check_planes(dev, &state->base);
13431 if (ret)
13432 goto fail;
13433
13434 intel_fbc_choose_crtc(dev_priv, state);
13435 ret = calc_watermark_data(state);
13436 if (ret)
13437 goto fail;
13438
13439 ret = intel_bw_atomic_check(state);
13440 if (ret)
13441 goto fail;
13442
13443 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13444 new_crtc_state, i) {
13445 if (!needs_modeset(&new_crtc_state->base) &&
13446 !new_crtc_state->update_pipe)
13447 continue;
13448
13449 intel_dump_pipe_config(new_crtc_state, state,
13450 needs_modeset(&new_crtc_state->base) ?
13451 "[modeset]" : "[fastset]");
13452 }
13453
13454 return 0;
13455
13456 fail:
13457 if (ret == -EDEADLK)
13458 return ret;
13459
13460 /*
13461 * FIXME would probably be nice to know which crtc specifically
13462 * caused the failure, in cases where we can pinpoint it.
13463 */
13464 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13465 new_crtc_state, i)
13466 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
13467
13468 return ret;
13469 }
13470
13471 static int intel_atomic_prepare_commit(struct drm_device *dev,
13472 struct drm_atomic_state *state)
13473 {
13474 return drm_atomic_helper_prepare_planes(dev, state);
13475 }
13476
13477 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13478 {
13479 struct drm_device *dev = crtc->base.dev;
13480 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13481
13482 if (!vblank->max_vblank_count)
13483 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13484
13485 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13486 }
13487
13488 static void intel_update_crtc(struct drm_crtc *crtc,
13489 struct drm_atomic_state *state,
13490 struct drm_crtc_state *old_crtc_state,
13491 struct drm_crtc_state *new_crtc_state)
13492 {
13493 struct drm_device *dev = crtc->dev;
13494 struct drm_i915_private *dev_priv = to_i915(dev);
13495 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13496 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13497 bool modeset = needs_modeset(new_crtc_state);
13498 struct intel_plane_state *new_plane_state =
13499 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13500 to_intel_plane(crtc->primary));
13501
13502 if (modeset) {
13503 update_scanline_offset(pipe_config);
13504 dev_priv->display.crtc_enable(pipe_config, state);
13505
13506 /* vblanks work again, re-enable pipe CRC. */
13507 intel_crtc_enable_pipe_crc(intel_crtc);
13508 } else {
13509 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13510 pipe_config);
13511
13512 if (pipe_config->update_pipe)
13513 intel_encoders_update_pipe(crtc, pipe_config, state);
13514 }
13515
13516 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13517 intel_fbc_disable(intel_crtc);
13518 else if (new_plane_state)
13519 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13520
13521 intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13522
13523 if (INTEL_GEN(dev_priv) >= 9)
13524 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13525 else
13526 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13527
13528 intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13529 }
13530
13531 static void intel_update_crtcs(struct drm_atomic_state *state)
13532 {
13533 struct drm_crtc *crtc;
13534 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13535 int i;
13536
13537 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13538 if (!new_crtc_state->active)
13539 continue;
13540
13541 intel_update_crtc(crtc, state, old_crtc_state,
13542 new_crtc_state);
13543 }
13544 }
13545
13546 static void skl_update_crtcs(struct drm_atomic_state *state)
13547 {
13548 struct drm_i915_private *dev_priv = to_i915(state->dev);
13549 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13550 struct drm_crtc *crtc;
13551 struct intel_crtc *intel_crtc;
13552 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13553 struct intel_crtc_state *cstate;
13554 unsigned int updated = 0;
13555 bool progress;
13556 enum pipe pipe;
13557 int i;
13558 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13559 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13560 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13561
13562 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13563 /* ignore allocations for crtc's that have been turned off. */
13564 if (new_crtc_state->active)
13565 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13566
13567 /* If 2nd DBuf slice required, enable it here */
13568 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13569 icl_dbuf_slices_update(dev_priv, required_slices);
13570
13571 /*
13572 * Whenever the number of active pipes changes, we need to make sure we
13573 * update the pipes in the right order so that their ddb allocations
13574 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13575 * cause pipe underruns and other bad stuff.
13576 */
13577 do {
13578 progress = false;
13579
13580 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13581 bool vbl_wait = false;
13582 unsigned int cmask = drm_crtc_mask(crtc);
13583
13584 intel_crtc = to_intel_crtc(crtc);
13585 cstate = to_intel_crtc_state(new_crtc_state);
13586 pipe = intel_crtc->pipe;
13587
13588 if (updated & cmask || !cstate->base.active)
13589 continue;
13590
13591 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13592 entries,
13593 INTEL_INFO(dev_priv)->num_pipes, i))
13594 continue;
13595
13596 updated |= cmask;
13597 entries[i] = cstate->wm.skl.ddb;
13598
13599 /*
13600 * If this is an already active pipe, it's DDB changed,
13601 * and this isn't the last pipe that needs updating
13602 * then we need to wait for a vblank to pass for the
13603 * new ddb allocation to take effect.
13604 */
13605 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13606 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13607 !new_crtc_state->active_changed &&
13608 intel_state->wm_results.dirty_pipes != updated)
13609 vbl_wait = true;
13610
13611 intel_update_crtc(crtc, state, old_crtc_state,
13612 new_crtc_state);
13613
13614 if (vbl_wait)
13615 intel_wait_for_vblank(dev_priv, pipe);
13616
13617 progress = true;
13618 }
13619 } while (progress);
13620
13621 /* If 2nd DBuf slice is no more required disable it */
13622 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13623 icl_dbuf_slices_update(dev_priv, required_slices);
13624 }
13625
13626 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13627 {
13628 struct intel_atomic_state *state, *next;
13629 struct llist_node *freed;
13630
13631 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13632 llist_for_each_entry_safe(state, next, freed, freed)
13633 drm_atomic_state_put(&state->base);
13634 }
13635
13636 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13637 {
13638 struct drm_i915_private *dev_priv =
13639 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13640
13641 intel_atomic_helper_free_state(dev_priv);
13642 }
13643
13644 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13645 {
13646 struct wait_queue_entry wait_fence, wait_reset;
13647 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13648
13649 init_wait_entry(&wait_fence, 0);
13650 init_wait_entry(&wait_reset, 0);
13651 for (;;) {
13652 prepare_to_wait(&intel_state->commit_ready.wait,
13653 &wait_fence, TASK_UNINTERRUPTIBLE);
13654 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13655 &wait_reset, TASK_UNINTERRUPTIBLE);
13656
13657
13658 if (i915_sw_fence_done(&intel_state->commit_ready)
13659 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13660 break;
13661
13662 schedule();
13663 }
13664 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13665 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13666 }
13667
13668 static void intel_atomic_cleanup_work(struct work_struct *work)
13669 {
13670 struct drm_atomic_state *state =
13671 container_of(work, struct drm_atomic_state, commit_work);
13672 struct drm_i915_private *i915 = to_i915(state->dev);
13673
13674 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13675 drm_atomic_helper_commit_cleanup_done(state);
13676 drm_atomic_state_put(state);
13677
13678 intel_atomic_helper_free_state(i915);
13679 }
13680
13681 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13682 {
13683 struct drm_device *dev = state->dev;
13684 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13685 struct drm_i915_private *dev_priv = to_i915(dev);
13686 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13687 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13688 struct drm_crtc *crtc;
13689 struct intel_crtc *intel_crtc;
13690 u64 put_domains[I915_MAX_PIPES] = {};
13691 intel_wakeref_t wakeref = 0;
13692 int i;
13693
13694 intel_atomic_commit_fence_wait(intel_state);
13695
13696 drm_atomic_helper_wait_for_dependencies(state);
13697
13698 if (intel_state->modeset)
13699 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13700
13701 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13702 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13703 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13704 intel_crtc = to_intel_crtc(crtc);
13705
13706 if (needs_modeset(new_crtc_state) ||
13707 to_intel_crtc_state(new_crtc_state)->update_pipe) {
13708
13709 put_domains[intel_crtc->pipe] =
13710 modeset_get_crtc_power_domains(crtc,
13711 new_intel_crtc_state);
13712 }
13713
13714 if (!needs_modeset(new_crtc_state))
13715 continue;
13716
13717 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13718
13719 if (old_crtc_state->active) {
13720 intel_crtc_disable_planes(intel_state, intel_crtc);
13721
13722 /*
13723 * We need to disable pipe CRC before disabling the pipe,
13724 * or we race against vblank off.
13725 */
13726 intel_crtc_disable_pipe_crc(intel_crtc);
13727
13728 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13729 intel_crtc->active = false;
13730 intel_fbc_disable(intel_crtc);
13731 intel_disable_shared_dpll(old_intel_crtc_state);
13732
13733 /*
13734 * Underruns don't always raise
13735 * interrupts, so check manually.
13736 */
13737 intel_check_cpu_fifo_underruns(dev_priv);
13738 intel_check_pch_fifo_underruns(dev_priv);
13739
13740 /* FIXME unify this for all platforms */
13741 if (!new_crtc_state->active &&
13742 !HAS_GMCH(dev_priv) &&
13743 dev_priv->display.initial_watermarks)
13744 dev_priv->display.initial_watermarks(intel_state,
13745 new_intel_crtc_state);
13746 }
13747 }
13748
13749 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13750 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13751 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13752
13753 if (intel_state->modeset) {
13754 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13755
13756 intel_set_cdclk_pre_plane_update(dev_priv,
13757 &intel_state->cdclk.actual,
13758 &dev_priv->cdclk.actual,
13759 intel_state->cdclk.pipe);
13760
13761 /*
13762 * SKL workaround: bspec recommends we disable the SAGV when we
13763 * have more then one pipe enabled
13764 */
13765 if (!intel_can_enable_sagv(state))
13766 intel_disable_sagv(dev_priv);
13767
13768 intel_modeset_verify_disabled(dev, state);
13769 }
13770
13771 /* Complete the events for pipes that have now been disabled */
13772 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13773 bool modeset = needs_modeset(new_crtc_state);
13774
13775 /* Complete events for now disable pipes here. */
13776 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13777 spin_lock_irq(&dev->event_lock);
13778 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13779 spin_unlock_irq(&dev->event_lock);
13780
13781 new_crtc_state->event = NULL;
13782 }
13783 }
13784
13785 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13786 dev_priv->display.update_crtcs(state);
13787
13788 if (intel_state->modeset)
13789 intel_set_cdclk_post_plane_update(dev_priv,
13790 &intel_state->cdclk.actual,
13791 &dev_priv->cdclk.actual,
13792 intel_state->cdclk.pipe);
13793
13794 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13795 * already, but still need the state for the delayed optimization. To
13796 * fix this:
13797 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13798 * - schedule that vblank worker _before_ calling hw_done
13799 * - at the start of commit_tail, cancel it _synchrously
13800 * - switch over to the vblank wait helper in the core after that since
13801 * we don't need out special handling any more.
13802 */
13803 drm_atomic_helper_wait_for_flip_done(dev, state);
13804
13805 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13806 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13807
13808 if (new_crtc_state->active &&
13809 !needs_modeset(new_crtc_state) &&
13810 (new_intel_crtc_state->base.color_mgmt_changed ||
13811 new_intel_crtc_state->update_pipe))
13812 intel_color_load_luts(new_intel_crtc_state);
13813 }
13814
13815 /*
13816 * Now that the vblank has passed, we can go ahead and program the
13817 * optimal watermarks on platforms that need two-step watermark
13818 * programming.
13819 *
13820 * TODO: Move this (and other cleanup) to an async worker eventually.
13821 */
13822 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13823 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13824
13825 if (dev_priv->display.optimize_watermarks)
13826 dev_priv->display.optimize_watermarks(intel_state,
13827 new_intel_crtc_state);
13828 }
13829
13830 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13831 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13832
13833 if (put_domains[i])
13834 modeset_put_power_domains(dev_priv, put_domains[i]);
13835
13836 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13837 }
13838
13839 if (intel_state->modeset)
13840 intel_verify_planes(intel_state);
13841
13842 if (intel_state->modeset && intel_can_enable_sagv(state))
13843 intel_enable_sagv(dev_priv);
13844
13845 drm_atomic_helper_commit_hw_done(state);
13846
13847 if (intel_state->modeset) {
13848 /* As one of the primary mmio accessors, KMS has a high
13849 * likelihood of triggering bugs in unclaimed access. After we
13850 * finish modesetting, see if an error has been flagged, and if
13851 * so enable debugging for the next modeset - and hope we catch
13852 * the culprit.
13853 */
13854 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13855 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13856 }
13857 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
13858
13859 /*
13860 * Defer the cleanup of the old state to a separate worker to not
13861 * impede the current task (userspace for blocking modesets) that
13862 * are executed inline. For out-of-line asynchronous modesets/flips,
13863 * deferring to a new worker seems overkill, but we would place a
13864 * schedule point (cond_resched()) here anyway to keep latencies
13865 * down.
13866 */
13867 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13868 queue_work(system_highpri_wq, &state->commit_work);
13869 }
13870
13871 static void intel_atomic_commit_work(struct work_struct *work)
13872 {
13873 struct drm_atomic_state *state =
13874 container_of(work, struct drm_atomic_state, commit_work);
13875
13876 intel_atomic_commit_tail(state);
13877 }
13878
13879 static int __i915_sw_fence_call
13880 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13881 enum i915_sw_fence_notify notify)
13882 {
13883 struct intel_atomic_state *state =
13884 container_of(fence, struct intel_atomic_state, commit_ready);
13885
13886 switch (notify) {
13887 case FENCE_COMPLETE:
13888 /* we do blocking waits in the worker, nothing to do here */
13889 break;
13890 case FENCE_FREE:
13891 {
13892 struct intel_atomic_helper *helper =
13893 &to_i915(state->base.dev)->atomic_helper;
13894
13895 if (llist_add(&state->freed, &helper->free_list))
13896 schedule_work(&helper->free_work);
13897 break;
13898 }
13899 }
13900
13901 return NOTIFY_DONE;
13902 }
13903
13904 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13905 {
13906 struct drm_plane_state *old_plane_state, *new_plane_state;
13907 struct drm_plane *plane;
13908 int i;
13909
13910 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13911 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13912 intel_fb_obj(new_plane_state->fb),
13913 to_intel_plane(plane)->frontbuffer_bit);
13914 }
13915
13916 /**
13917 * intel_atomic_commit - commit validated state object
13918 * @dev: DRM device
13919 * @state: the top-level driver state object
13920 * @nonblock: nonblocking commit
13921 *
13922 * This function commits a top-level state object that has been validated
13923 * with drm_atomic_helper_check().
13924 *
13925 * RETURNS
13926 * Zero for success or -errno.
13927 */
13928 static int intel_atomic_commit(struct drm_device *dev,
13929 struct drm_atomic_state *state,
13930 bool nonblock)
13931 {
13932 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13933 struct drm_i915_private *dev_priv = to_i915(dev);
13934 int ret = 0;
13935
13936 intel_state->wakeref = intel_runtime_pm_get(dev_priv);
13937
13938 drm_atomic_state_get(state);
13939 i915_sw_fence_init(&intel_state->commit_ready,
13940 intel_atomic_commit_ready);
13941
13942 /*
13943 * The intel_legacy_cursor_update() fast path takes care
13944 * of avoiding the vblank waits for simple cursor
13945 * movement and flips. For cursor on/off and size changes,
13946 * we want to perform the vblank waits so that watermark
13947 * updates happen during the correct frames. Gen9+ have
13948 * double buffered watermarks and so shouldn't need this.
13949 *
13950 * Unset state->legacy_cursor_update before the call to
13951 * drm_atomic_helper_setup_commit() because otherwise
13952 * drm_atomic_helper_wait_for_flip_done() is a noop and
13953 * we get FIFO underruns because we didn't wait
13954 * for vblank.
13955 *
13956 * FIXME doing watermarks and fb cleanup from a vblank worker
13957 * (assuming we had any) would solve these problems.
13958 */
13959 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13960 struct intel_crtc_state *new_crtc_state;
13961 struct intel_crtc *crtc;
13962 int i;
13963
13964 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13965 if (new_crtc_state->wm.need_postvbl_update ||
13966 new_crtc_state->update_wm_post)
13967 state->legacy_cursor_update = false;
13968 }
13969
13970 ret = intel_atomic_prepare_commit(dev, state);
13971 if (ret) {
13972 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13973 i915_sw_fence_commit(&intel_state->commit_ready);
13974 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
13975 return ret;
13976 }
13977
13978 ret = drm_atomic_helper_setup_commit(state, nonblock);
13979 if (!ret)
13980 ret = drm_atomic_helper_swap_state(state, true);
13981
13982 if (ret) {
13983 i915_sw_fence_commit(&intel_state->commit_ready);
13984
13985 drm_atomic_helper_cleanup_planes(dev, state);
13986 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
13987 return ret;
13988 }
13989 dev_priv->wm.distrust_bios_wm = false;
13990 intel_shared_dpll_swap_state(state);
13991 intel_atomic_track_fbs(state);
13992
13993 if (intel_state->modeset) {
13994 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13995 sizeof(intel_state->min_cdclk));
13996 memcpy(dev_priv->min_voltage_level,
13997 intel_state->min_voltage_level,
13998 sizeof(intel_state->min_voltage_level));
13999 dev_priv->active_crtcs = intel_state->active_crtcs;
14000 dev_priv->cdclk.force_min_cdclk =
14001 intel_state->cdclk.force_min_cdclk;
14002
14003 intel_cdclk_swap_state(intel_state);
14004 }
14005
14006 drm_atomic_state_get(state);
14007 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
14008
14009 i915_sw_fence_commit(&intel_state->commit_ready);
14010 if (nonblock && intel_state->modeset) {
14011 queue_work(dev_priv->modeset_wq, &state->commit_work);
14012 } else if (nonblock) {
14013 queue_work(system_unbound_wq, &state->commit_work);
14014 } else {
14015 if (intel_state->modeset)
14016 flush_workqueue(dev_priv->modeset_wq);
14017 intel_atomic_commit_tail(state);
14018 }
14019
14020 return 0;
14021 }
14022
14023 static const struct drm_crtc_funcs intel_crtc_funcs = {
14024 .gamma_set = drm_atomic_helper_legacy_gamma_set,
14025 .set_config = drm_atomic_helper_set_config,
14026 .destroy = intel_crtc_destroy,
14027 .page_flip = drm_atomic_helper_page_flip,
14028 .atomic_duplicate_state = intel_crtc_duplicate_state,
14029 .atomic_destroy_state = intel_crtc_destroy_state,
14030 .set_crc_source = intel_crtc_set_crc_source,
14031 .verify_crc_source = intel_crtc_verify_crc_source,
14032 .get_crc_sources = intel_crtc_get_crc_sources,
14033 };
14034
14035 struct wait_rps_boost {
14036 struct wait_queue_entry wait;
14037
14038 struct drm_crtc *crtc;
14039 struct i915_request *request;
14040 };
14041
14042 static int do_rps_boost(struct wait_queue_entry *_wait,
14043 unsigned mode, int sync, void *key)
14044 {
14045 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14046 struct i915_request *rq = wait->request;
14047
14048 /*
14049 * If we missed the vblank, but the request is already running it
14050 * is reasonable to assume that it will complete before the next
14051 * vblank without our intervention, so leave RPS alone.
14052 */
14053 if (!i915_request_started(rq))
14054 gen6_rps_boost(rq);
14055 i915_request_put(rq);
14056
14057 drm_crtc_vblank_put(wait->crtc);
14058
14059 list_del(&wait->wait.entry);
14060 kfree(wait);
14061 return 1;
14062 }
14063
14064 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14065 struct dma_fence *fence)
14066 {
14067 struct wait_rps_boost *wait;
14068
14069 if (!dma_fence_is_i915(fence))
14070 return;
14071
14072 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14073 return;
14074
14075 if (drm_crtc_vblank_get(crtc))
14076 return;
14077
14078 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14079 if (!wait) {
14080 drm_crtc_vblank_put(crtc);
14081 return;
14082 }
14083
14084 wait->request = to_request(dma_fence_get(fence));
14085 wait->crtc = crtc;
14086
14087 wait->wait.func = do_rps_boost;
14088 wait->wait.flags = 0;
14089
14090 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14091 }
14092
14093 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14094 {
14095 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14096 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14097 struct drm_framebuffer *fb = plane_state->base.fb;
14098 struct i915_vma *vma;
14099
14100 if (plane->id == PLANE_CURSOR &&
14101 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14102 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14103 const int align = intel_cursor_alignment(dev_priv);
14104 int err;
14105
14106 err = i915_gem_object_attach_phys(obj, align);
14107 if (err)
14108 return err;
14109 }
14110
14111 vma = intel_pin_and_fence_fb_obj(fb,
14112 &plane_state->view,
14113 intel_plane_uses_fence(plane_state),
14114 &plane_state->flags);
14115 if (IS_ERR(vma))
14116 return PTR_ERR(vma);
14117
14118 plane_state->vma = vma;
14119
14120 return 0;
14121 }
14122
14123 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14124 {
14125 struct i915_vma *vma;
14126
14127 vma = fetch_and_zero(&old_plane_state->vma);
14128 if (vma)
14129 intel_unpin_fb_vma(vma, old_plane_state->flags);
14130 }
14131
14132 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14133 {
14134 struct i915_sched_attr attr = {
14135 .priority = I915_PRIORITY_DISPLAY,
14136 };
14137
14138 i915_gem_object_wait_priority(obj, 0, &attr);
14139 }
14140
14141 /**
14142 * intel_prepare_plane_fb - Prepare fb for usage on plane
14143 * @plane: drm plane to prepare for
14144 * @new_state: the plane state being prepared
14145 *
14146 * Prepares a framebuffer for usage on a display plane. Generally this
14147 * involves pinning the underlying object and updating the frontbuffer tracking
14148 * bits. Some older platforms need special physical address handling for
14149 * cursor planes.
14150 *
14151 * Must be called with struct_mutex held.
14152 *
14153 * Returns 0 on success, negative error code on failure.
14154 */
14155 int
14156 intel_prepare_plane_fb(struct drm_plane *plane,
14157 struct drm_plane_state *new_state)
14158 {
14159 struct intel_atomic_state *intel_state =
14160 to_intel_atomic_state(new_state->state);
14161 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14162 struct drm_framebuffer *fb = new_state->fb;
14163 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14164 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14165 int ret;
14166
14167 if (old_obj) {
14168 struct drm_crtc_state *crtc_state =
14169 drm_atomic_get_new_crtc_state(new_state->state,
14170 plane->state->crtc);
14171
14172 /* Big Hammer, we also need to ensure that any pending
14173 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14174 * current scanout is retired before unpinning the old
14175 * framebuffer. Note that we rely on userspace rendering
14176 * into the buffer attached to the pipe they are waiting
14177 * on. If not, userspace generates a GPU hang with IPEHR
14178 * point to the MI_WAIT_FOR_EVENT.
14179 *
14180 * This should only fail upon a hung GPU, in which case we
14181 * can safely continue.
14182 */
14183 if (needs_modeset(crtc_state)) {
14184 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14185 old_obj->resv, NULL,
14186 false, 0,
14187 GFP_KERNEL);
14188 if (ret < 0)
14189 return ret;
14190 }
14191 }
14192
14193 if (new_state->fence) { /* explicit fencing */
14194 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14195 new_state->fence,
14196 I915_FENCE_TIMEOUT,
14197 GFP_KERNEL);
14198 if (ret < 0)
14199 return ret;
14200 }
14201
14202 if (!obj)
14203 return 0;
14204
14205 ret = i915_gem_object_pin_pages(obj);
14206 if (ret)
14207 return ret;
14208
14209 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14210 if (ret) {
14211 i915_gem_object_unpin_pages(obj);
14212 return ret;
14213 }
14214
14215 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14216
14217 mutex_unlock(&dev_priv->drm.struct_mutex);
14218 i915_gem_object_unpin_pages(obj);
14219 if (ret)
14220 return ret;
14221
14222 fb_obj_bump_render_priority(obj);
14223 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14224
14225 if (!new_state->fence) { /* implicit fencing */
14226 struct dma_fence *fence;
14227
14228 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14229 obj->resv, NULL,
14230 false, I915_FENCE_TIMEOUT,
14231 GFP_KERNEL);
14232 if (ret < 0)
14233 return ret;
14234
14235 fence = reservation_object_get_excl_rcu(obj->resv);
14236 if (fence) {
14237 add_rps_boost_after_vblank(new_state->crtc, fence);
14238 dma_fence_put(fence);
14239 }
14240 } else {
14241 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14242 }
14243
14244 /*
14245 * We declare pageflips to be interactive and so merit a small bias
14246 * towards upclocking to deliver the frame on time. By only changing
14247 * the RPS thresholds to sample more regularly and aim for higher
14248 * clocks we can hopefully deliver low power workloads (like kodi)
14249 * that are not quite steady state without resorting to forcing
14250 * maximum clocks following a vblank miss (see do_rps_boost()).
14251 */
14252 if (!intel_state->rps_interactive) {
14253 intel_rps_mark_interactive(dev_priv, true);
14254 intel_state->rps_interactive = true;
14255 }
14256
14257 return 0;
14258 }
14259
14260 /**
14261 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14262 * @plane: drm plane to clean up for
14263 * @old_state: the state from the previous modeset
14264 *
14265 * Cleans up a framebuffer that has just been removed from a plane.
14266 *
14267 * Must be called with struct_mutex held.
14268 */
14269 void
14270 intel_cleanup_plane_fb(struct drm_plane *plane,
14271 struct drm_plane_state *old_state)
14272 {
14273 struct intel_atomic_state *intel_state =
14274 to_intel_atomic_state(old_state->state);
14275 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14276
14277 if (intel_state->rps_interactive) {
14278 intel_rps_mark_interactive(dev_priv, false);
14279 intel_state->rps_interactive = false;
14280 }
14281
14282 /* Should only be called after a successful intel_prepare_plane_fb()! */
14283 mutex_lock(&dev_priv->drm.struct_mutex);
14284 intel_plane_unpin_fb(to_intel_plane_state(old_state));
14285 mutex_unlock(&dev_priv->drm.struct_mutex);
14286 }
14287
14288 int
14289 skl_max_scale(const struct intel_crtc_state *crtc_state,
14290 u32 pixel_format)
14291 {
14292 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14293 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14294 int max_scale, mult;
14295 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14296
14297 if (!crtc_state->base.enable)
14298 return DRM_PLANE_HELPER_NO_SCALING;
14299
14300 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14301 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14302
14303 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14304 max_dotclk *= 2;
14305
14306 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14307 return DRM_PLANE_HELPER_NO_SCALING;
14308
14309 /*
14310 * skl max scale is lower of:
14311 * close to 3 but not 3, -1 is for that purpose
14312 * or
14313 * cdclk/crtc_clock
14314 */
14315 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14316 tmpclk1 = (1 << 16) * mult - 1;
14317 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14318 max_scale = min(tmpclk1, tmpclk2);
14319
14320 return max_scale;
14321 }
14322
14323 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14324 struct intel_crtc *crtc)
14325 {
14326 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14327 struct intel_crtc_state *old_crtc_state =
14328 intel_atomic_get_old_crtc_state(state, crtc);
14329 struct intel_crtc_state *new_crtc_state =
14330 intel_atomic_get_new_crtc_state(state, crtc);
14331 bool modeset = needs_modeset(&new_crtc_state->base);
14332
14333 /* Perform vblank evasion around commit operation */
14334 intel_pipe_update_start(new_crtc_state);
14335
14336 if (modeset)
14337 goto out;
14338
14339 if (new_crtc_state->base.color_mgmt_changed ||
14340 new_crtc_state->update_pipe)
14341 intel_color_commit(new_crtc_state);
14342
14343 if (new_crtc_state->update_pipe)
14344 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14345 else if (INTEL_GEN(dev_priv) >= 9)
14346 skl_detach_scalers(new_crtc_state);
14347
14348 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14349 bdw_set_pipemisc(new_crtc_state);
14350
14351 out:
14352 if (dev_priv->display.atomic_update_watermarks)
14353 dev_priv->display.atomic_update_watermarks(state,
14354 new_crtc_state);
14355 }
14356
14357 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14358 struct intel_crtc_state *crtc_state)
14359 {
14360 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14361
14362 if (!IS_GEN(dev_priv, 2))
14363 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14364
14365 if (crtc_state->has_pch_encoder) {
14366 enum pipe pch_transcoder =
14367 intel_crtc_pch_transcoder(crtc);
14368
14369 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14370 }
14371 }
14372
14373 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14374 struct intel_crtc *crtc)
14375 {
14376 struct intel_crtc_state *old_crtc_state =
14377 intel_atomic_get_old_crtc_state(state, crtc);
14378 struct intel_crtc_state *new_crtc_state =
14379 intel_atomic_get_new_crtc_state(state, crtc);
14380
14381 intel_pipe_update_end(new_crtc_state);
14382
14383 if (new_crtc_state->update_pipe &&
14384 !needs_modeset(&new_crtc_state->base) &&
14385 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14386 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14387 }
14388
14389 /**
14390 * intel_plane_destroy - destroy a plane
14391 * @plane: plane to destroy
14392 *
14393 * Common destruction function for all types of planes (primary, cursor,
14394 * sprite).
14395 */
14396 void intel_plane_destroy(struct drm_plane *plane)
14397 {
14398 drm_plane_cleanup(plane);
14399 kfree(to_intel_plane(plane));
14400 }
14401
14402 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14403 u32 format, u64 modifier)
14404 {
14405 switch (modifier) {
14406 case DRM_FORMAT_MOD_LINEAR:
14407 case I915_FORMAT_MOD_X_TILED:
14408 break;
14409 default:
14410 return false;
14411 }
14412
14413 switch (format) {
14414 case DRM_FORMAT_C8:
14415 case DRM_FORMAT_RGB565:
14416 case DRM_FORMAT_XRGB1555:
14417 case DRM_FORMAT_XRGB8888:
14418 return modifier == DRM_FORMAT_MOD_LINEAR ||
14419 modifier == I915_FORMAT_MOD_X_TILED;
14420 default:
14421 return false;
14422 }
14423 }
14424
14425 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14426 u32 format, u64 modifier)
14427 {
14428 switch (modifier) {
14429 case DRM_FORMAT_MOD_LINEAR:
14430 case I915_FORMAT_MOD_X_TILED:
14431 break;
14432 default:
14433 return false;
14434 }
14435
14436 switch (format) {
14437 case DRM_FORMAT_C8:
14438 case DRM_FORMAT_RGB565:
14439 case DRM_FORMAT_XRGB8888:
14440 case DRM_FORMAT_XBGR8888:
14441 case DRM_FORMAT_XRGB2101010:
14442 case DRM_FORMAT_XBGR2101010:
14443 return modifier == DRM_FORMAT_MOD_LINEAR ||
14444 modifier == I915_FORMAT_MOD_X_TILED;
14445 default:
14446 return false;
14447 }
14448 }
14449
14450 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14451 u32 format, u64 modifier)
14452 {
14453 return modifier == DRM_FORMAT_MOD_LINEAR &&
14454 format == DRM_FORMAT_ARGB8888;
14455 }
14456
14457 static const struct drm_plane_funcs i965_plane_funcs = {
14458 .update_plane = drm_atomic_helper_update_plane,
14459 .disable_plane = drm_atomic_helper_disable_plane,
14460 .destroy = intel_plane_destroy,
14461 .atomic_get_property = intel_plane_atomic_get_property,
14462 .atomic_set_property = intel_plane_atomic_set_property,
14463 .atomic_duplicate_state = intel_plane_duplicate_state,
14464 .atomic_destroy_state = intel_plane_destroy_state,
14465 .format_mod_supported = i965_plane_format_mod_supported,
14466 };
14467
14468 static const struct drm_plane_funcs i8xx_plane_funcs = {
14469 .update_plane = drm_atomic_helper_update_plane,
14470 .disable_plane = drm_atomic_helper_disable_plane,
14471 .destroy = intel_plane_destroy,
14472 .atomic_get_property = intel_plane_atomic_get_property,
14473 .atomic_set_property = intel_plane_atomic_set_property,
14474 .atomic_duplicate_state = intel_plane_duplicate_state,
14475 .atomic_destroy_state = intel_plane_destroy_state,
14476 .format_mod_supported = i8xx_plane_format_mod_supported,
14477 };
14478
14479 static int
14480 intel_legacy_cursor_update(struct drm_plane *plane,
14481 struct drm_crtc *crtc,
14482 struct drm_framebuffer *fb,
14483 int crtc_x, int crtc_y,
14484 unsigned int crtc_w, unsigned int crtc_h,
14485 u32 src_x, u32 src_y,
14486 u32 src_w, u32 src_h,
14487 struct drm_modeset_acquire_ctx *ctx)
14488 {
14489 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14490 int ret;
14491 struct drm_plane_state *old_plane_state, *new_plane_state;
14492 struct intel_plane *intel_plane = to_intel_plane(plane);
14493 struct drm_framebuffer *old_fb;
14494 struct intel_crtc_state *crtc_state =
14495 to_intel_crtc_state(crtc->state);
14496 struct intel_crtc_state *new_crtc_state;
14497
14498 /*
14499 * When crtc is inactive or there is a modeset pending,
14500 * wait for it to complete in the slowpath
14501 */
14502 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14503 crtc_state->update_pipe)
14504 goto slow;
14505
14506 old_plane_state = plane->state;
14507 /*
14508 * Don't do an async update if there is an outstanding commit modifying
14509 * the plane. This prevents our async update's changes from getting
14510 * overridden by a previous synchronous update's state.
14511 */
14512 if (old_plane_state->commit &&
14513 !try_wait_for_completion(&old_plane_state->commit->hw_done))
14514 goto slow;
14515
14516 /*
14517 * If any parameters change that may affect watermarks,
14518 * take the slowpath. Only changing fb or position should be
14519 * in the fastpath.
14520 */
14521 if (old_plane_state->crtc != crtc ||
14522 old_plane_state->src_w != src_w ||
14523 old_plane_state->src_h != src_h ||
14524 old_plane_state->crtc_w != crtc_w ||
14525 old_plane_state->crtc_h != crtc_h ||
14526 !old_plane_state->fb != !fb)
14527 goto slow;
14528
14529 new_plane_state = intel_plane_duplicate_state(plane);
14530 if (!new_plane_state)
14531 return -ENOMEM;
14532
14533 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14534 if (!new_crtc_state) {
14535 ret = -ENOMEM;
14536 goto out_free;
14537 }
14538
14539 drm_atomic_set_fb_for_plane(new_plane_state, fb);
14540
14541 new_plane_state->src_x = src_x;
14542 new_plane_state->src_y = src_y;
14543 new_plane_state->src_w = src_w;
14544 new_plane_state->src_h = src_h;
14545 new_plane_state->crtc_x = crtc_x;
14546 new_plane_state->crtc_y = crtc_y;
14547 new_plane_state->crtc_w = crtc_w;
14548 new_plane_state->crtc_h = crtc_h;
14549
14550 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14551 to_intel_plane_state(old_plane_state),
14552 to_intel_plane_state(new_plane_state));
14553 if (ret)
14554 goto out_free;
14555
14556 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14557 if (ret)
14558 goto out_free;
14559
14560 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14561 if (ret)
14562 goto out_unlock;
14563
14564 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14565
14566 old_fb = old_plane_state->fb;
14567 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14568 intel_plane->frontbuffer_bit);
14569
14570 /* Swap plane state */
14571 plane->state = new_plane_state;
14572
14573 /*
14574 * We cannot swap crtc_state as it may be in use by an atomic commit or
14575 * page flip that's running simultaneously. If we swap crtc_state and
14576 * destroy the old state, we will cause a use-after-free there.
14577 *
14578 * Only update active_planes, which is needed for our internal
14579 * bookkeeping. Either value will do the right thing when updating
14580 * planes atomically. If the cursor was part of the atomic update then
14581 * we would have taken the slowpath.
14582 */
14583 crtc_state->active_planes = new_crtc_state->active_planes;
14584
14585 if (plane->state->visible)
14586 intel_update_plane(intel_plane, crtc_state,
14587 to_intel_plane_state(plane->state));
14588 else
14589 intel_disable_plane(intel_plane, crtc_state);
14590
14591 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14592
14593 out_unlock:
14594 mutex_unlock(&dev_priv->drm.struct_mutex);
14595 out_free:
14596 if (new_crtc_state)
14597 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14598 if (ret)
14599 intel_plane_destroy_state(plane, new_plane_state);
14600 else
14601 intel_plane_destroy_state(plane, old_plane_state);
14602 return ret;
14603
14604 slow:
14605 return drm_atomic_helper_update_plane(plane, crtc, fb,
14606 crtc_x, crtc_y, crtc_w, crtc_h,
14607 src_x, src_y, src_w, src_h, ctx);
14608 }
14609
14610 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14611 .update_plane = intel_legacy_cursor_update,
14612 .disable_plane = drm_atomic_helper_disable_plane,
14613 .destroy = intel_plane_destroy,
14614 .atomic_get_property = intel_plane_atomic_get_property,
14615 .atomic_set_property = intel_plane_atomic_set_property,
14616 .atomic_duplicate_state = intel_plane_duplicate_state,
14617 .atomic_destroy_state = intel_plane_destroy_state,
14618 .format_mod_supported = intel_cursor_format_mod_supported,
14619 };
14620
14621 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14622 enum i9xx_plane_id i9xx_plane)
14623 {
14624 if (!HAS_FBC(dev_priv))
14625 return false;
14626
14627 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14628 return i9xx_plane == PLANE_A; /* tied to pipe A */
14629 else if (IS_IVYBRIDGE(dev_priv))
14630 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14631 i9xx_plane == PLANE_C;
14632 else if (INTEL_GEN(dev_priv) >= 4)
14633 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14634 else
14635 return i9xx_plane == PLANE_A;
14636 }
14637
14638 static struct intel_plane *
14639 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14640 {
14641 struct intel_plane *plane;
14642 const struct drm_plane_funcs *plane_funcs;
14643 unsigned int supported_rotations;
14644 unsigned int possible_crtcs;
14645 const u64 *modifiers;
14646 const u32 *formats;
14647 int num_formats;
14648 int ret;
14649
14650 if (INTEL_GEN(dev_priv) >= 9)
14651 return skl_universal_plane_create(dev_priv, pipe,
14652 PLANE_PRIMARY);
14653
14654 plane = intel_plane_alloc();
14655 if (IS_ERR(plane))
14656 return plane;
14657
14658 plane->pipe = pipe;
14659 /*
14660 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14661 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14662 */
14663 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14664 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14665 else
14666 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14667 plane->id = PLANE_PRIMARY;
14668 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14669
14670 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14671 if (plane->has_fbc) {
14672 struct intel_fbc *fbc = &dev_priv->fbc;
14673
14674 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14675 }
14676
14677 if (INTEL_GEN(dev_priv) >= 4) {
14678 formats = i965_primary_formats;
14679 num_formats = ARRAY_SIZE(i965_primary_formats);
14680 modifiers = i9xx_format_modifiers;
14681
14682 plane->max_stride = i9xx_plane_max_stride;
14683 plane->update_plane = i9xx_update_plane;
14684 plane->disable_plane = i9xx_disable_plane;
14685 plane->get_hw_state = i9xx_plane_get_hw_state;
14686 plane->check_plane = i9xx_plane_check;
14687
14688 plane_funcs = &i965_plane_funcs;
14689 } else {
14690 formats = i8xx_primary_formats;
14691 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14692 modifiers = i9xx_format_modifiers;
14693
14694 plane->max_stride = i9xx_plane_max_stride;
14695 plane->update_plane = i9xx_update_plane;
14696 plane->disable_plane = i9xx_disable_plane;
14697 plane->get_hw_state = i9xx_plane_get_hw_state;
14698 plane->check_plane = i9xx_plane_check;
14699
14700 plane_funcs = &i8xx_plane_funcs;
14701 }
14702
14703 possible_crtcs = BIT(pipe);
14704
14705 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14706 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14707 possible_crtcs, plane_funcs,
14708 formats, num_formats, modifiers,
14709 DRM_PLANE_TYPE_PRIMARY,
14710 "primary %c", pipe_name(pipe));
14711 else
14712 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14713 possible_crtcs, plane_funcs,
14714 formats, num_formats, modifiers,
14715 DRM_PLANE_TYPE_PRIMARY,
14716 "plane %c",
14717 plane_name(plane->i9xx_plane));
14718 if (ret)
14719 goto fail;
14720
14721 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14722 supported_rotations =
14723 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14724 DRM_MODE_REFLECT_X;
14725 } else if (INTEL_GEN(dev_priv) >= 4) {
14726 supported_rotations =
14727 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14728 } else {
14729 supported_rotations = DRM_MODE_ROTATE_0;
14730 }
14731
14732 if (INTEL_GEN(dev_priv) >= 4)
14733 drm_plane_create_rotation_property(&plane->base,
14734 DRM_MODE_ROTATE_0,
14735 supported_rotations);
14736
14737 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14738
14739 return plane;
14740
14741 fail:
14742 intel_plane_free(plane);
14743
14744 return ERR_PTR(ret);
14745 }
14746
14747 static struct intel_plane *
14748 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14749 enum pipe pipe)
14750 {
14751 unsigned int possible_crtcs;
14752 struct intel_plane *cursor;
14753 int ret;
14754
14755 cursor = intel_plane_alloc();
14756 if (IS_ERR(cursor))
14757 return cursor;
14758
14759 cursor->pipe = pipe;
14760 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14761 cursor->id = PLANE_CURSOR;
14762 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14763
14764 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14765 cursor->max_stride = i845_cursor_max_stride;
14766 cursor->update_plane = i845_update_cursor;
14767 cursor->disable_plane = i845_disable_cursor;
14768 cursor->get_hw_state = i845_cursor_get_hw_state;
14769 cursor->check_plane = i845_check_cursor;
14770 } else {
14771 cursor->max_stride = i9xx_cursor_max_stride;
14772 cursor->update_plane = i9xx_update_cursor;
14773 cursor->disable_plane = i9xx_disable_cursor;
14774 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14775 cursor->check_plane = i9xx_check_cursor;
14776 }
14777
14778 cursor->cursor.base = ~0;
14779 cursor->cursor.cntl = ~0;
14780
14781 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14782 cursor->cursor.size = ~0;
14783
14784 possible_crtcs = BIT(pipe);
14785
14786 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14787 possible_crtcs, &intel_cursor_plane_funcs,
14788 intel_cursor_formats,
14789 ARRAY_SIZE(intel_cursor_formats),
14790 cursor_format_modifiers,
14791 DRM_PLANE_TYPE_CURSOR,
14792 "cursor %c", pipe_name(pipe));
14793 if (ret)
14794 goto fail;
14795
14796 if (INTEL_GEN(dev_priv) >= 4)
14797 drm_plane_create_rotation_property(&cursor->base,
14798 DRM_MODE_ROTATE_0,
14799 DRM_MODE_ROTATE_0 |
14800 DRM_MODE_ROTATE_180);
14801
14802 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14803
14804 return cursor;
14805
14806 fail:
14807 intel_plane_free(cursor);
14808
14809 return ERR_PTR(ret);
14810 }
14811
14812 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14813 struct intel_crtc_state *crtc_state)
14814 {
14815 struct intel_crtc_scaler_state *scaler_state =
14816 &crtc_state->scaler_state;
14817 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14818 int i;
14819
14820 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14821 if (!crtc->num_scalers)
14822 return;
14823
14824 for (i = 0; i < crtc->num_scalers; i++) {
14825 struct intel_scaler *scaler = &scaler_state->scalers[i];
14826
14827 scaler->in_use = 0;
14828 scaler->mode = 0;
14829 }
14830
14831 scaler_state->scaler_id = -1;
14832 }
14833
14834 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14835 {
14836 struct intel_crtc *intel_crtc;
14837 struct intel_crtc_state *crtc_state = NULL;
14838 struct intel_plane *primary = NULL;
14839 struct intel_plane *cursor = NULL;
14840 int sprite, ret;
14841
14842 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14843 if (!intel_crtc)
14844 return -ENOMEM;
14845
14846 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14847 if (!crtc_state) {
14848 ret = -ENOMEM;
14849 goto fail;
14850 }
14851 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
14852 intel_crtc->config = crtc_state;
14853
14854 primary = intel_primary_plane_create(dev_priv, pipe);
14855 if (IS_ERR(primary)) {
14856 ret = PTR_ERR(primary);
14857 goto fail;
14858 }
14859 intel_crtc->plane_ids_mask |= BIT(primary->id);
14860
14861 for_each_sprite(dev_priv, pipe, sprite) {
14862 struct intel_plane *plane;
14863
14864 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14865 if (IS_ERR(plane)) {
14866 ret = PTR_ERR(plane);
14867 goto fail;
14868 }
14869 intel_crtc->plane_ids_mask |= BIT(plane->id);
14870 }
14871
14872 cursor = intel_cursor_plane_create(dev_priv, pipe);
14873 if (IS_ERR(cursor)) {
14874 ret = PTR_ERR(cursor);
14875 goto fail;
14876 }
14877 intel_crtc->plane_ids_mask |= BIT(cursor->id);
14878
14879 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14880 &primary->base, &cursor->base,
14881 &intel_crtc_funcs,
14882 "pipe %c", pipe_name(pipe));
14883 if (ret)
14884 goto fail;
14885
14886 intel_crtc->pipe = pipe;
14887
14888 /* initialize shared scalers */
14889 intel_crtc_init_scalers(intel_crtc, crtc_state);
14890
14891 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14892 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14893 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14894
14895 if (INTEL_GEN(dev_priv) < 9) {
14896 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14897
14898 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14899 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14900 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14901 }
14902
14903 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14904
14905 intel_color_init(intel_crtc);
14906
14907 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14908
14909 return 0;
14910
14911 fail:
14912 /*
14913 * drm_mode_config_cleanup() will free up any
14914 * crtcs/planes already initialized.
14915 */
14916 kfree(crtc_state);
14917 kfree(intel_crtc);
14918
14919 return ret;
14920 }
14921
14922 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14923 struct drm_file *file)
14924 {
14925 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14926 struct drm_crtc *drmmode_crtc;
14927 struct intel_crtc *crtc;
14928
14929 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14930 if (!drmmode_crtc)
14931 return -ENOENT;
14932
14933 crtc = to_intel_crtc(drmmode_crtc);
14934 pipe_from_crtc_id->pipe = crtc->pipe;
14935
14936 return 0;
14937 }
14938
14939 static int intel_encoder_clones(struct intel_encoder *encoder)
14940 {
14941 struct drm_device *dev = encoder->base.dev;
14942 struct intel_encoder *source_encoder;
14943 int index_mask = 0;
14944 int entry = 0;
14945
14946 for_each_intel_encoder(dev, source_encoder) {
14947 if (encoders_cloneable(encoder, source_encoder))
14948 index_mask |= (1 << entry);
14949
14950 entry++;
14951 }
14952
14953 return index_mask;
14954 }
14955
14956 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14957 {
14958 if (!IS_MOBILE(dev_priv))
14959 return false;
14960
14961 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14962 return false;
14963
14964 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14965 return false;
14966
14967 return true;
14968 }
14969
14970 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14971 {
14972 if (INTEL_GEN(dev_priv) >= 9)
14973 return false;
14974
14975 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14976 return false;
14977
14978 if (HAS_PCH_LPT_H(dev_priv) &&
14979 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14980 return false;
14981
14982 /* DDI E can't be used if DDI A requires 4 lanes */
14983 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14984 return false;
14985
14986 if (!dev_priv->vbt.int_crt_support)
14987 return false;
14988
14989 return true;
14990 }
14991
14992 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14993 {
14994 int pps_num;
14995 int pps_idx;
14996
14997 if (HAS_DDI(dev_priv))
14998 return;
14999 /*
15000 * This w/a is needed at least on CPT/PPT, but to be sure apply it
15001 * everywhere where registers can be write protected.
15002 */
15003 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15004 pps_num = 2;
15005 else
15006 pps_num = 1;
15007
15008 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15009 u32 val = I915_READ(PP_CONTROL(pps_idx));
15010
15011 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15012 I915_WRITE(PP_CONTROL(pps_idx), val);
15013 }
15014 }
15015
15016 static void intel_pps_init(struct drm_i915_private *dev_priv)
15017 {
15018 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15019 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15020 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15021 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15022 else
15023 dev_priv->pps_mmio_base = PPS_BASE;
15024
15025 intel_pps_unlock_regs_wa(dev_priv);
15026 }
15027
15028 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15029 {
15030 struct intel_encoder *encoder;
15031 bool dpd_is_edp = false;
15032
15033 intel_pps_init(dev_priv);
15034
15035 if (!HAS_DISPLAY(dev_priv))
15036 return;
15037
15038 if (IS_ELKHARTLAKE(dev_priv)) {
15039 intel_ddi_init(dev_priv, PORT_A);
15040 intel_ddi_init(dev_priv, PORT_B);
15041 intel_ddi_init(dev_priv, PORT_C);
15042 icl_dsi_init(dev_priv);
15043 } else if (INTEL_GEN(dev_priv) >= 11) {
15044 intel_ddi_init(dev_priv, PORT_A);
15045 intel_ddi_init(dev_priv, PORT_B);
15046 intel_ddi_init(dev_priv, PORT_C);
15047 intel_ddi_init(dev_priv, PORT_D);
15048 intel_ddi_init(dev_priv, PORT_E);
15049 /*
15050 * On some ICL SKUs port F is not present. No strap bits for
15051 * this, so rely on VBT.
15052 * Work around broken VBTs on SKUs known to have no port F.
15053 */
15054 if (IS_ICL_WITH_PORT_F(dev_priv) &&
15055 intel_bios_is_port_present(dev_priv, PORT_F))
15056 intel_ddi_init(dev_priv, PORT_F);
15057
15058 icl_dsi_init(dev_priv);
15059 } else if (IS_GEN9_LP(dev_priv)) {
15060 /*
15061 * FIXME: Broxton doesn't support port detection via the
15062 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15063 * detect the ports.
15064 */
15065 intel_ddi_init(dev_priv, PORT_A);
15066 intel_ddi_init(dev_priv, PORT_B);
15067 intel_ddi_init(dev_priv, PORT_C);
15068
15069 vlv_dsi_init(dev_priv);
15070 } else if (HAS_DDI(dev_priv)) {
15071 int found;
15072
15073 if (intel_ddi_crt_present(dev_priv))
15074 intel_crt_init(dev_priv);
15075
15076 /*
15077 * Haswell uses DDI functions to detect digital outputs.
15078 * On SKL pre-D0 the strap isn't connected, so we assume
15079 * it's there.
15080 */
15081 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15082 /* WaIgnoreDDIAStrap: skl */
15083 if (found || IS_GEN9_BC(dev_priv))
15084 intel_ddi_init(dev_priv, PORT_A);
15085
15086 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15087 * register */
15088 found = I915_READ(SFUSE_STRAP);
15089
15090 if (found & SFUSE_STRAP_DDIB_DETECTED)
15091 intel_ddi_init(dev_priv, PORT_B);
15092 if (found & SFUSE_STRAP_DDIC_DETECTED)
15093 intel_ddi_init(dev_priv, PORT_C);
15094 if (found & SFUSE_STRAP_DDID_DETECTED)
15095 intel_ddi_init(dev_priv, PORT_D);
15096 if (found & SFUSE_STRAP_DDIF_DETECTED)
15097 intel_ddi_init(dev_priv, PORT_F);
15098 /*
15099 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15100 */
15101 if (IS_GEN9_BC(dev_priv) &&
15102 intel_bios_is_port_present(dev_priv, PORT_E))
15103 intel_ddi_init(dev_priv, PORT_E);
15104
15105 } else if (HAS_PCH_SPLIT(dev_priv)) {
15106 int found;
15107
15108 /*
15109 * intel_edp_init_connector() depends on this completing first,
15110 * to prevent the registration of both eDP and LVDS and the
15111 * incorrect sharing of the PPS.
15112 */
15113 intel_lvds_init(dev_priv);
15114 intel_crt_init(dev_priv);
15115
15116 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
15117
15118 if (ilk_has_edp_a(dev_priv))
15119 intel_dp_init(dev_priv, DP_A, PORT_A);
15120
15121 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15122 /* PCH SDVOB multiplex with HDMIB */
15123 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
15124 if (!found)
15125 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
15126 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15127 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
15128 }
15129
15130 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15131 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
15132
15133 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15134 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
15135
15136 if (I915_READ(PCH_DP_C) & DP_DETECTED)
15137 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
15138
15139 if (I915_READ(PCH_DP_D) & DP_DETECTED)
15140 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
15141 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15142 bool has_edp, has_port;
15143
15144 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15145 intel_crt_init(dev_priv);
15146
15147 /*
15148 * The DP_DETECTED bit is the latched state of the DDC
15149 * SDA pin at boot. However since eDP doesn't require DDC
15150 * (no way to plug in a DP->HDMI dongle) the DDC pins for
15151 * eDP ports may have been muxed to an alternate function.
15152 * Thus we can't rely on the DP_DETECTED bit alone to detect
15153 * eDP ports. Consult the VBT as well as DP_DETECTED to
15154 * detect eDP ports.
15155 *
15156 * Sadly the straps seem to be missing sometimes even for HDMI
15157 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15158 * and VBT for the presence of the port. Additionally we can't
15159 * trust the port type the VBT declares as we've seen at least
15160 * HDMI ports that the VBT claim are DP or eDP.
15161 */
15162 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
15163 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15164 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15165 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
15166 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15167 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
15168
15169 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
15170 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15171 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15172 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
15173 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15174 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
15175
15176 if (IS_CHERRYVIEW(dev_priv)) {
15177 /*
15178 * eDP not supported on port D,
15179 * so no need to worry about it
15180 */
15181 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15182 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15183 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
15184 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15185 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
15186 }
15187
15188 vlv_dsi_init(dev_priv);
15189 } else if (IS_PINEVIEW(dev_priv)) {
15190 intel_lvds_init(dev_priv);
15191 intel_crt_init(dev_priv);
15192 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
15193 bool found = false;
15194
15195 if (IS_MOBILE(dev_priv))
15196 intel_lvds_init(dev_priv);
15197
15198 intel_crt_init(dev_priv);
15199
15200 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15201 DRM_DEBUG_KMS("probing SDVOB\n");
15202 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15203 if (!found && IS_G4X(dev_priv)) {
15204 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15205 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15206 }
15207
15208 if (!found && IS_G4X(dev_priv))
15209 intel_dp_init(dev_priv, DP_B, PORT_B);
15210 }
15211
15212 /* Before G4X SDVOC doesn't have its own detect register */
15213
15214 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15215 DRM_DEBUG_KMS("probing SDVOC\n");
15216 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15217 }
15218
15219 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15220
15221 if (IS_G4X(dev_priv)) {
15222 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15223 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15224 }
15225 if (IS_G4X(dev_priv))
15226 intel_dp_init(dev_priv, DP_C, PORT_C);
15227 }
15228
15229 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15230 intel_dp_init(dev_priv, DP_D, PORT_D);
15231
15232 if (SUPPORTS_TV(dev_priv))
15233 intel_tv_init(dev_priv);
15234 } else if (IS_GEN(dev_priv, 2)) {
15235 if (IS_I85X(dev_priv))
15236 intel_lvds_init(dev_priv);
15237
15238 intel_crt_init(dev_priv);
15239 intel_dvo_init(dev_priv);
15240 }
15241
15242 intel_psr_init(dev_priv);
15243
15244 for_each_intel_encoder(&dev_priv->drm, encoder) {
15245 encoder->base.possible_crtcs = encoder->crtc_mask;
15246 encoder->base.possible_clones =
15247 intel_encoder_clones(encoder);
15248 }
15249
15250 intel_init_pch_refclk(dev_priv);
15251
15252 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15253 }
15254
15255 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15256 {
15257 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15258 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15259
15260 drm_framebuffer_cleanup(fb);
15261
15262 i915_gem_object_lock(obj);
15263 WARN_ON(!obj->framebuffer_references--);
15264 i915_gem_object_unlock(obj);
15265
15266 i915_gem_object_put(obj);
15267
15268 kfree(intel_fb);
15269 }
15270
15271 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15272 struct drm_file *file,
15273 unsigned int *handle)
15274 {
15275 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15276
15277 if (obj->userptr.mm) {
15278 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15279 return -EINVAL;
15280 }
15281
15282 return drm_gem_handle_create(file, &obj->base, handle);
15283 }
15284
15285 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15286 struct drm_file *file,
15287 unsigned flags, unsigned color,
15288 struct drm_clip_rect *clips,
15289 unsigned num_clips)
15290 {
15291 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15292
15293 i915_gem_object_flush_if_display(obj);
15294 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15295
15296 return 0;
15297 }
15298
15299 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15300 .destroy = intel_user_framebuffer_destroy,
15301 .create_handle = intel_user_framebuffer_create_handle,
15302 .dirty = intel_user_framebuffer_dirty,
15303 };
15304
15305 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15306 struct drm_i915_gem_object *obj,
15307 struct drm_mode_fb_cmd2 *mode_cmd)
15308 {
15309 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15310 struct drm_framebuffer *fb = &intel_fb->base;
15311 u32 max_stride;
15312 unsigned int tiling, stride;
15313 int ret = -EINVAL;
15314 int i;
15315
15316 i915_gem_object_lock(obj);
15317 obj->framebuffer_references++;
15318 tiling = i915_gem_object_get_tiling(obj);
15319 stride = i915_gem_object_get_stride(obj);
15320 i915_gem_object_unlock(obj);
15321
15322 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15323 /*
15324 * If there's a fence, enforce that
15325 * the fb modifier and tiling mode match.
15326 */
15327 if (tiling != I915_TILING_NONE &&
15328 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15329 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15330 goto err;
15331 }
15332 } else {
15333 if (tiling == I915_TILING_X) {
15334 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15335 } else if (tiling == I915_TILING_Y) {
15336 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15337 goto err;
15338 }
15339 }
15340
15341 if (!drm_any_plane_has_format(&dev_priv->drm,
15342 mode_cmd->pixel_format,
15343 mode_cmd->modifier[0])) {
15344 struct drm_format_name_buf format_name;
15345
15346 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15347 drm_get_format_name(mode_cmd->pixel_format,
15348 &format_name),
15349 mode_cmd->modifier[0]);
15350 goto err;
15351 }
15352
15353 /*
15354 * gen2/3 display engine uses the fence if present,
15355 * so the tiling mode must match the fb modifier exactly.
15356 */
15357 if (INTEL_GEN(dev_priv) < 4 &&
15358 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15359 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15360 goto err;
15361 }
15362
15363 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15364 mode_cmd->modifier[0]);
15365 if (mode_cmd->pitches[0] > max_stride) {
15366 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15367 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15368 "tiled" : "linear",
15369 mode_cmd->pitches[0], max_stride);
15370 goto err;
15371 }
15372
15373 /*
15374 * If there's a fence, enforce that
15375 * the fb pitch and fence stride match.
15376 */
15377 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15378 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15379 mode_cmd->pitches[0], stride);
15380 goto err;
15381 }
15382
15383 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15384 if (mode_cmd->offsets[0] != 0)
15385 goto err;
15386
15387 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15388
15389 for (i = 0; i < fb->format->num_planes; i++) {
15390 u32 stride_alignment;
15391
15392 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15393 DRM_DEBUG_KMS("bad plane %d handle\n", i);
15394 goto err;
15395 }
15396
15397 stride_alignment = intel_fb_stride_alignment(fb, i);
15398
15399 /*
15400 * Display WA #0531: skl,bxt,kbl,glk
15401 *
15402 * Render decompression and plane width > 3840
15403 * combined with horizontal panning requires the
15404 * plane stride to be a multiple of 4. We'll just
15405 * require the entire fb to accommodate that to avoid
15406 * potential runtime errors at plane configuration time.
15407 */
15408 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15409 is_ccs_modifier(fb->modifier))
15410 stride_alignment *= 4;
15411
15412 if (fb->pitches[i] & (stride_alignment - 1)) {
15413 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15414 i, fb->pitches[i], stride_alignment);
15415 goto err;
15416 }
15417
15418 fb->obj[i] = &obj->base;
15419 }
15420
15421 ret = intel_fill_fb_info(dev_priv, fb);
15422 if (ret)
15423 goto err;
15424
15425 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15426 if (ret) {
15427 DRM_ERROR("framebuffer init failed %d\n", ret);
15428 goto err;
15429 }
15430
15431 return 0;
15432
15433 err:
15434 i915_gem_object_lock(obj);
15435 obj->framebuffer_references--;
15436 i915_gem_object_unlock(obj);
15437 return ret;
15438 }
15439
15440 static struct drm_framebuffer *
15441 intel_user_framebuffer_create(struct drm_device *dev,
15442 struct drm_file *filp,
15443 const struct drm_mode_fb_cmd2 *user_mode_cmd)
15444 {
15445 struct drm_framebuffer *fb;
15446 struct drm_i915_gem_object *obj;
15447 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15448
15449 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15450 if (!obj)
15451 return ERR_PTR(-ENOENT);
15452
15453 fb = intel_framebuffer_create(obj, &mode_cmd);
15454 if (IS_ERR(fb))
15455 i915_gem_object_put(obj);
15456
15457 return fb;
15458 }
15459
15460 static void intel_atomic_state_free(struct drm_atomic_state *state)
15461 {
15462 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15463
15464 drm_atomic_state_default_release(state);
15465
15466 i915_sw_fence_fini(&intel_state->commit_ready);
15467
15468 kfree(state);
15469 }
15470
15471 static enum drm_mode_status
15472 intel_mode_valid(struct drm_device *dev,
15473 const struct drm_display_mode *mode)
15474 {
15475 struct drm_i915_private *dev_priv = to_i915(dev);
15476 int hdisplay_max, htotal_max;
15477 int vdisplay_max, vtotal_max;
15478
15479 /*
15480 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15481 * of DBLSCAN modes to the output's mode list when they detect
15482 * the scaling mode property on the connector. And they don't
15483 * ask the kernel to validate those modes in any way until
15484 * modeset time at which point the client gets a protocol error.
15485 * So in order to not upset those clients we silently ignore the
15486 * DBLSCAN flag on such connectors. For other connectors we will
15487 * reject modes with the DBLSCAN flag in encoder->compute_config().
15488 * And we always reject DBLSCAN modes in connector->mode_valid()
15489 * as we never want such modes on the connector's mode list.
15490 */
15491
15492 if (mode->vscan > 1)
15493 return MODE_NO_VSCAN;
15494
15495 if (mode->flags & DRM_MODE_FLAG_HSKEW)
15496 return MODE_H_ILLEGAL;
15497
15498 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15499 DRM_MODE_FLAG_NCSYNC |
15500 DRM_MODE_FLAG_PCSYNC))
15501 return MODE_HSYNC;
15502
15503 if (mode->flags & (DRM_MODE_FLAG_BCAST |
15504 DRM_MODE_FLAG_PIXMUX |
15505 DRM_MODE_FLAG_CLKDIV2))
15506 return MODE_BAD;
15507
15508 if (INTEL_GEN(dev_priv) >= 9 ||
15509 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15510 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15511 vdisplay_max = 4096;
15512 htotal_max = 8192;
15513 vtotal_max = 8192;
15514 } else if (INTEL_GEN(dev_priv) >= 3) {
15515 hdisplay_max = 4096;
15516 vdisplay_max = 4096;
15517 htotal_max = 8192;
15518 vtotal_max = 8192;
15519 } else {
15520 hdisplay_max = 2048;
15521 vdisplay_max = 2048;
15522 htotal_max = 4096;
15523 vtotal_max = 4096;
15524 }
15525
15526 if (mode->hdisplay > hdisplay_max ||
15527 mode->hsync_start > htotal_max ||
15528 mode->hsync_end > htotal_max ||
15529 mode->htotal > htotal_max)
15530 return MODE_H_ILLEGAL;
15531
15532 if (mode->vdisplay > vdisplay_max ||
15533 mode->vsync_start > vtotal_max ||
15534 mode->vsync_end > vtotal_max ||
15535 mode->vtotal > vtotal_max)
15536 return MODE_V_ILLEGAL;
15537
15538 return MODE_OK;
15539 }
15540
15541 static const struct drm_mode_config_funcs intel_mode_funcs = {
15542 .fb_create = intel_user_framebuffer_create,
15543 .get_format_info = intel_get_format_info,
15544 .output_poll_changed = intel_fbdev_output_poll_changed,
15545 .mode_valid = intel_mode_valid,
15546 .atomic_check = intel_atomic_check,
15547 .atomic_commit = intel_atomic_commit,
15548 .atomic_state_alloc = intel_atomic_state_alloc,
15549 .atomic_state_clear = intel_atomic_state_clear,
15550 .atomic_state_free = intel_atomic_state_free,
15551 };
15552
15553 /**
15554 * intel_init_display_hooks - initialize the display modesetting hooks
15555 * @dev_priv: device private
15556 */
15557 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15558 {
15559 intel_init_cdclk_hooks(dev_priv);
15560
15561 if (INTEL_GEN(dev_priv) >= 9) {
15562 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15563 dev_priv->display.get_initial_plane_config =
15564 skylake_get_initial_plane_config;
15565 dev_priv->display.crtc_compute_clock =
15566 haswell_crtc_compute_clock;
15567 dev_priv->display.crtc_enable = haswell_crtc_enable;
15568 dev_priv->display.crtc_disable = haswell_crtc_disable;
15569 } else if (HAS_DDI(dev_priv)) {
15570 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15571 dev_priv->display.get_initial_plane_config =
15572 i9xx_get_initial_plane_config;
15573 dev_priv->display.crtc_compute_clock =
15574 haswell_crtc_compute_clock;
15575 dev_priv->display.crtc_enable = haswell_crtc_enable;
15576 dev_priv->display.crtc_disable = haswell_crtc_disable;
15577 } else if (HAS_PCH_SPLIT(dev_priv)) {
15578 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15579 dev_priv->display.get_initial_plane_config =
15580 i9xx_get_initial_plane_config;
15581 dev_priv->display.crtc_compute_clock =
15582 ironlake_crtc_compute_clock;
15583 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15584 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15585 } else if (IS_CHERRYVIEW(dev_priv)) {
15586 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15587 dev_priv->display.get_initial_plane_config =
15588 i9xx_get_initial_plane_config;
15589 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15590 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15591 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15592 } else if (IS_VALLEYVIEW(dev_priv)) {
15593 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15594 dev_priv->display.get_initial_plane_config =
15595 i9xx_get_initial_plane_config;
15596 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15597 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15598 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15599 } else if (IS_G4X(dev_priv)) {
15600 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15601 dev_priv->display.get_initial_plane_config =
15602 i9xx_get_initial_plane_config;
15603 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15604 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15605 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15606 } else if (IS_PINEVIEW(dev_priv)) {
15607 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15608 dev_priv->display.get_initial_plane_config =
15609 i9xx_get_initial_plane_config;
15610 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15611 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15612 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15613 } else if (!IS_GEN(dev_priv, 2)) {
15614 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15615 dev_priv->display.get_initial_plane_config =
15616 i9xx_get_initial_plane_config;
15617 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15618 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15619 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15620 } else {
15621 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15622 dev_priv->display.get_initial_plane_config =
15623 i9xx_get_initial_plane_config;
15624 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15625 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15626 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15627 }
15628
15629 if (IS_GEN(dev_priv, 5)) {
15630 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15631 } else if (IS_GEN(dev_priv, 6)) {
15632 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15633 } else if (IS_IVYBRIDGE(dev_priv)) {
15634 /* FIXME: detect B0+ stepping and use auto training */
15635 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15636 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15637 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15638 }
15639
15640 if (INTEL_GEN(dev_priv) >= 9)
15641 dev_priv->display.update_crtcs = skl_update_crtcs;
15642 else
15643 dev_priv->display.update_crtcs = intel_update_crtcs;
15644 }
15645
15646 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
15647 {
15648 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15649 return VLV_VGACNTRL;
15650 else if (INTEL_GEN(dev_priv) >= 5)
15651 return CPU_VGACNTRL;
15652 else
15653 return VGACNTRL;
15654 }
15655
15656 /* Disable the VGA plane that we never use */
15657 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15658 {
15659 struct pci_dev *pdev = dev_priv->drm.pdev;
15660 u8 sr1;
15661 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15662
15663 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15664 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15665 outb(SR01, VGA_SR_INDEX);
15666 sr1 = inb(VGA_SR_DATA);
15667 outb(sr1 | 1<<5, VGA_SR_DATA);
15668 vga_put(pdev, VGA_RSRC_LEGACY_IO);
15669 udelay(300);
15670
15671 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15672 POSTING_READ(vga_reg);
15673 }
15674
15675 void intel_modeset_init_hw(struct drm_device *dev)
15676 {
15677 struct drm_i915_private *dev_priv = to_i915(dev);
15678
15679 intel_update_cdclk(dev_priv);
15680 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15681 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15682 }
15683
15684 /*
15685 * Calculate what we think the watermarks should be for the state we've read
15686 * out of the hardware and then immediately program those watermarks so that
15687 * we ensure the hardware settings match our internal state.
15688 *
15689 * We can calculate what we think WM's should be by creating a duplicate of the
15690 * current state (which was constructed during hardware readout) and running it
15691 * through the atomic check code to calculate new watermark values in the
15692 * state object.
15693 */
15694 static void sanitize_watermarks(struct drm_device *dev)
15695 {
15696 struct drm_i915_private *dev_priv = to_i915(dev);
15697 struct drm_atomic_state *state;
15698 struct intel_atomic_state *intel_state;
15699 struct drm_crtc *crtc;
15700 struct drm_crtc_state *cstate;
15701 struct drm_modeset_acquire_ctx ctx;
15702 int ret;
15703 int i;
15704
15705 /* Only supported on platforms that use atomic watermark design */
15706 if (!dev_priv->display.optimize_watermarks)
15707 return;
15708
15709 /*
15710 * We need to hold connection_mutex before calling duplicate_state so
15711 * that the connector loop is protected.
15712 */
15713 drm_modeset_acquire_init(&ctx, 0);
15714 retry:
15715 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15716 if (ret == -EDEADLK) {
15717 drm_modeset_backoff(&ctx);
15718 goto retry;
15719 } else if (WARN_ON(ret)) {
15720 goto fail;
15721 }
15722
15723 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15724 if (WARN_ON(IS_ERR(state)))
15725 goto fail;
15726
15727 intel_state = to_intel_atomic_state(state);
15728
15729 /*
15730 * Hardware readout is the only time we don't want to calculate
15731 * intermediate watermarks (since we don't trust the current
15732 * watermarks).
15733 */
15734 if (!HAS_GMCH(dev_priv))
15735 intel_state->skip_intermediate_wm = true;
15736
15737 ret = intel_atomic_check(dev, state);
15738 if (ret) {
15739 /*
15740 * If we fail here, it means that the hardware appears to be
15741 * programmed in a way that shouldn't be possible, given our
15742 * understanding of watermark requirements. This might mean a
15743 * mistake in the hardware readout code or a mistake in the
15744 * watermark calculations for a given platform. Raise a WARN
15745 * so that this is noticeable.
15746 *
15747 * If this actually happens, we'll have to just leave the
15748 * BIOS-programmed watermarks untouched and hope for the best.
15749 */
15750 WARN(true, "Could not determine valid watermarks for inherited state\n");
15751 goto put_state;
15752 }
15753
15754 /* Write calculated watermark values back */
15755 for_each_new_crtc_in_state(state, crtc, cstate, i) {
15756 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15757
15758 cs->wm.need_postvbl_update = true;
15759 dev_priv->display.optimize_watermarks(intel_state, cs);
15760
15761 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15762 }
15763
15764 put_state:
15765 drm_atomic_state_put(state);
15766 fail:
15767 drm_modeset_drop_locks(&ctx);
15768 drm_modeset_acquire_fini(&ctx);
15769 }
15770
15771 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15772 {
15773 if (IS_GEN(dev_priv, 5)) {
15774 u32 fdi_pll_clk =
15775 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15776
15777 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15778 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15779 dev_priv->fdi_pll_freq = 270000;
15780 } else {
15781 return;
15782 }
15783
15784 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15785 }
15786
15787 static int intel_initial_commit(struct drm_device *dev)
15788 {
15789 struct drm_atomic_state *state = NULL;
15790 struct drm_modeset_acquire_ctx ctx;
15791 struct drm_crtc *crtc;
15792 struct drm_crtc_state *crtc_state;
15793 int ret = 0;
15794
15795 state = drm_atomic_state_alloc(dev);
15796 if (!state)
15797 return -ENOMEM;
15798
15799 drm_modeset_acquire_init(&ctx, 0);
15800
15801 retry:
15802 state->acquire_ctx = &ctx;
15803
15804 drm_for_each_crtc(crtc, dev) {
15805 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15806 if (IS_ERR(crtc_state)) {
15807 ret = PTR_ERR(crtc_state);
15808 goto out;
15809 }
15810
15811 if (crtc_state->active) {
15812 ret = drm_atomic_add_affected_planes(state, crtc);
15813 if (ret)
15814 goto out;
15815
15816 /*
15817 * FIXME hack to force a LUT update to avoid the
15818 * plane update forcing the pipe gamma on without
15819 * having a proper LUT loaded. Remove once we
15820 * have readout for pipe gamma enable.
15821 */
15822 crtc_state->color_mgmt_changed = true;
15823 }
15824 }
15825
15826 ret = drm_atomic_commit(state);
15827
15828 out:
15829 if (ret == -EDEADLK) {
15830 drm_atomic_state_clear(state);
15831 drm_modeset_backoff(&ctx);
15832 goto retry;
15833 }
15834
15835 drm_atomic_state_put(state);
15836
15837 drm_modeset_drop_locks(&ctx);
15838 drm_modeset_acquire_fini(&ctx);
15839
15840 return ret;
15841 }
15842
15843 int intel_modeset_init(struct drm_device *dev)
15844 {
15845 struct drm_i915_private *dev_priv = to_i915(dev);
15846 struct i915_ggtt *ggtt = &dev_priv->ggtt;
15847 enum pipe pipe;
15848 struct intel_crtc *crtc;
15849 int ret;
15850
15851 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15852
15853 drm_mode_config_init(dev);
15854
15855 ret = intel_bw_init(dev_priv);
15856 if (ret)
15857 return ret;
15858
15859 dev->mode_config.min_width = 0;
15860 dev->mode_config.min_height = 0;
15861
15862 dev->mode_config.preferred_depth = 24;
15863 dev->mode_config.prefer_shadow = 1;
15864
15865 dev->mode_config.allow_fb_modifiers = true;
15866
15867 dev->mode_config.funcs = &intel_mode_funcs;
15868
15869 init_llist_head(&dev_priv->atomic_helper.free_list);
15870 INIT_WORK(&dev_priv->atomic_helper.free_work,
15871 intel_atomic_helper_free_state_worker);
15872
15873 intel_init_quirks(dev_priv);
15874
15875 intel_fbc_init(dev_priv);
15876
15877 intel_init_pm(dev_priv);
15878
15879 /*
15880 * There may be no VBT; and if the BIOS enabled SSC we can
15881 * just keep using it to avoid unnecessary flicker. Whereas if the
15882 * BIOS isn't using it, don't assume it will work even if the VBT
15883 * indicates as much.
15884 */
15885 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15886 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15887 DREF_SSC1_ENABLE);
15888
15889 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15890 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15891 bios_lvds_use_ssc ? "en" : "dis",
15892 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15893 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15894 }
15895 }
15896
15897 /*
15898 * Maximum framebuffer dimensions, chosen to match
15899 * the maximum render engine surface size on gen4+.
15900 */
15901 if (INTEL_GEN(dev_priv) >= 7) {
15902 dev->mode_config.max_width = 16384;
15903 dev->mode_config.max_height = 16384;
15904 } else if (INTEL_GEN(dev_priv) >= 4) {
15905 dev->mode_config.max_width = 8192;
15906 dev->mode_config.max_height = 8192;
15907 } else if (IS_GEN(dev_priv, 3)) {
15908 dev->mode_config.max_width = 4096;
15909 dev->mode_config.max_height = 4096;
15910 } else {
15911 dev->mode_config.max_width = 2048;
15912 dev->mode_config.max_height = 2048;
15913 }
15914
15915 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15916 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15917 dev->mode_config.cursor_height = 1023;
15918 } else if (IS_GEN(dev_priv, 2)) {
15919 dev->mode_config.cursor_width = 64;
15920 dev->mode_config.cursor_height = 64;
15921 } else {
15922 dev->mode_config.cursor_width = 256;
15923 dev->mode_config.cursor_height = 256;
15924 }
15925
15926 dev->mode_config.fb_base = ggtt->gmadr.start;
15927
15928 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15929 INTEL_INFO(dev_priv)->num_pipes,
15930 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15931
15932 for_each_pipe(dev_priv, pipe) {
15933 ret = intel_crtc_init(dev_priv, pipe);
15934 if (ret) {
15935 drm_mode_config_cleanup(dev);
15936 return ret;
15937 }
15938 }
15939
15940 intel_shared_dpll_init(dev);
15941 intel_update_fdi_pll_freq(dev_priv);
15942
15943 intel_update_czclk(dev_priv);
15944 intel_modeset_init_hw(dev);
15945
15946 intel_hdcp_component_init(dev_priv);
15947
15948 if (dev_priv->max_cdclk_freq == 0)
15949 intel_update_max_cdclk(dev_priv);
15950
15951 /* Just disable it once at startup */
15952 i915_disable_vga(dev_priv);
15953 intel_setup_outputs(dev_priv);
15954
15955 drm_modeset_lock_all(dev);
15956 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15957 drm_modeset_unlock_all(dev);
15958
15959 for_each_intel_crtc(dev, crtc) {
15960 struct intel_initial_plane_config plane_config = {};
15961
15962 if (!crtc->active)
15963 continue;
15964
15965 /*
15966 * Note that reserving the BIOS fb up front prevents us
15967 * from stuffing other stolen allocations like the ring
15968 * on top. This prevents some ugliness at boot time, and
15969 * can even allow for smooth boot transitions if the BIOS
15970 * fb is large enough for the active pipe configuration.
15971 */
15972 dev_priv->display.get_initial_plane_config(crtc,
15973 &plane_config);
15974
15975 /*
15976 * If the fb is shared between multiple heads, we'll
15977 * just get the first one.
15978 */
15979 intel_find_initial_plane_obj(crtc, &plane_config);
15980 }
15981
15982 /*
15983 * Make sure hardware watermarks really match the state we read out.
15984 * Note that we need to do this after reconstructing the BIOS fb's
15985 * since the watermark calculation done here will use pstate->fb.
15986 */
15987 if (!HAS_GMCH(dev_priv))
15988 sanitize_watermarks(dev);
15989
15990 /*
15991 * Force all active planes to recompute their states. So that on
15992 * mode_setcrtc after probe, all the intel_plane_state variables
15993 * are already calculated and there is no assert_plane warnings
15994 * during bootup.
15995 */
15996 ret = intel_initial_commit(dev);
15997 if (ret)
15998 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15999
16000 return 0;
16001 }
16002
16003 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16004 {
16005 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16006 /* 640x480@60Hz, ~25175 kHz */
16007 struct dpll clock = {
16008 .m1 = 18,
16009 .m2 = 7,
16010 .p1 = 13,
16011 .p2 = 4,
16012 .n = 2,
16013 };
16014 u32 dpll, fp;
16015 int i;
16016
16017 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16018
16019 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16020 pipe_name(pipe), clock.vco, clock.dot);
16021
16022 fp = i9xx_dpll_compute_fp(&clock);
16023 dpll = DPLL_DVO_2X_MODE |
16024 DPLL_VGA_MODE_DIS |
16025 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16026 PLL_P2_DIVIDE_BY_4 |
16027 PLL_REF_INPUT_DREFCLK |
16028 DPLL_VCO_ENABLE;
16029
16030 I915_WRITE(FP0(pipe), fp);
16031 I915_WRITE(FP1(pipe), fp);
16032
16033 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16034 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16035 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16036 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16037 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16038 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16039 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16040
16041 /*
16042 * Apparently we need to have VGA mode enabled prior to changing
16043 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16044 * dividers, even though the register value does change.
16045 */
16046 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16047 I915_WRITE(DPLL(pipe), dpll);
16048
16049 /* Wait for the clocks to stabilize. */
16050 POSTING_READ(DPLL(pipe));
16051 udelay(150);
16052
16053 /* The pixel multiplier can only be updated once the
16054 * DPLL is enabled and the clocks are stable.
16055 *
16056 * So write it again.
16057 */
16058 I915_WRITE(DPLL(pipe), dpll);
16059
16060 /* We do this three times for luck */
16061 for (i = 0; i < 3 ; i++) {
16062 I915_WRITE(DPLL(pipe), dpll);
16063 POSTING_READ(DPLL(pipe));
16064 udelay(150); /* wait for warmup */
16065 }
16066
16067 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16068 POSTING_READ(PIPECONF(pipe));
16069
16070 intel_wait_for_pipe_scanline_moving(crtc);
16071 }
16072
16073 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16074 {
16075 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16076
16077 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16078 pipe_name(pipe));
16079
16080 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16081 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16082 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
16083 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16084 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
16085
16086 I915_WRITE(PIPECONF(pipe), 0);
16087 POSTING_READ(PIPECONF(pipe));
16088
16089 intel_wait_for_pipe_scanline_stopped(crtc);
16090
16091 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16092 POSTING_READ(DPLL(pipe));
16093 }
16094
16095 static void
16096 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16097 {
16098 struct intel_crtc *crtc;
16099
16100 if (INTEL_GEN(dev_priv) >= 4)
16101 return;
16102
16103 for_each_intel_crtc(&dev_priv->drm, crtc) {
16104 struct intel_plane *plane =
16105 to_intel_plane(crtc->base.primary);
16106 struct intel_crtc *plane_crtc;
16107 enum pipe pipe;
16108
16109 if (!plane->get_hw_state(plane, &pipe))
16110 continue;
16111
16112 if (pipe == crtc->pipe)
16113 continue;
16114
16115 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16116 plane->base.base.id, plane->base.name);
16117
16118 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16119 intel_plane_disable_noatomic(plane_crtc, plane);
16120 }
16121 }
16122
16123 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16124 {
16125 struct drm_device *dev = crtc->base.dev;
16126 struct intel_encoder *encoder;
16127
16128 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16129 return true;
16130
16131 return false;
16132 }
16133
16134 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16135 {
16136 struct drm_device *dev = encoder->base.dev;
16137 struct intel_connector *connector;
16138
16139 for_each_connector_on_encoder(dev, &encoder->base, connector)
16140 return connector;
16141
16142 return NULL;
16143 }
16144
16145 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16146 enum pipe pch_transcoder)
16147 {
16148 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16149 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
16150 }
16151
16152 static void intel_sanitize_crtc(struct intel_crtc *crtc,
16153 struct drm_modeset_acquire_ctx *ctx)
16154 {
16155 struct drm_device *dev = crtc->base.dev;
16156 struct drm_i915_private *dev_priv = to_i915(dev);
16157 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16158 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
16159
16160 /* Clear any frame start delays used for debugging left by the BIOS */
16161 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
16162 i915_reg_t reg = PIPECONF(cpu_transcoder);
16163
16164 I915_WRITE(reg,
16165 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16166 }
16167
16168 if (crtc_state->base.active) {
16169 struct intel_plane *plane;
16170
16171 /* Disable everything but the primary plane */
16172 for_each_intel_plane_on_crtc(dev, crtc, plane) {
16173 const struct intel_plane_state *plane_state =
16174 to_intel_plane_state(plane->base.state);
16175
16176 if (plane_state->base.visible &&
16177 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16178 intel_plane_disable_noatomic(crtc, plane);
16179 }
16180
16181 /*
16182 * Disable any background color set by the BIOS, but enable the
16183 * gamma and CSC to match how we program our planes.
16184 */
16185 if (INTEL_GEN(dev_priv) >= 9)
16186 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16187 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16188 SKL_BOTTOM_COLOR_CSC_ENABLE);
16189 }
16190
16191 /* Adjust the state of the output pipe according to whether we
16192 * have active connectors/encoders. */
16193 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
16194 intel_crtc_disable_noatomic(&crtc->base, ctx);
16195
16196 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
16197 /*
16198 * We start out with underrun reporting disabled to avoid races.
16199 * For correct bookkeeping mark this on active crtcs.
16200 *
16201 * Also on gmch platforms we dont have any hardware bits to
16202 * disable the underrun reporting. Which means we need to start
16203 * out with underrun reporting disabled also on inactive pipes,
16204 * since otherwise we'll complain about the garbage we read when
16205 * e.g. coming up after runtime pm.
16206 *
16207 * No protection against concurrent access is required - at
16208 * worst a fifo underrun happens which also sets this to false.
16209 */
16210 crtc->cpu_fifo_underrun_disabled = true;
16211 /*
16212 * We track the PCH trancoder underrun reporting state
16213 * within the crtc. With crtc for pipe A housing the underrun
16214 * reporting state for PCH transcoder A, crtc for pipe B housing
16215 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16216 * and marking underrun reporting as disabled for the non-existing
16217 * PCH transcoders B and C would prevent enabling the south
16218 * error interrupt (see cpt_can_enable_serr_int()).
16219 */
16220 if (has_pch_trancoder(dev_priv, crtc->pipe))
16221 crtc->pch_fifo_underrun_disabled = true;
16222 }
16223 }
16224
16225 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16226 {
16227 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16228
16229 /*
16230 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16231 * the hardware when a high res displays plugged in. DPLL P
16232 * divider is zero, and the pipe timings are bonkers. We'll
16233 * try to disable everything in that case.
16234 *
16235 * FIXME would be nice to be able to sanitize this state
16236 * without several WARNs, but for now let's take the easy
16237 * road.
16238 */
16239 return IS_GEN(dev_priv, 6) &&
16240 crtc_state->base.active &&
16241 crtc_state->shared_dpll &&
16242 crtc_state->port_clock == 0;
16243 }
16244
16245 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16246 {
16247 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16248 struct intel_connector *connector;
16249 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16250 struct intel_crtc_state *crtc_state = crtc ?
16251 to_intel_crtc_state(crtc->base.state) : NULL;
16252
16253 /* We need to check both for a crtc link (meaning that the
16254 * encoder is active and trying to read from a pipe) and the
16255 * pipe itself being active. */
16256 bool has_active_crtc = crtc_state &&
16257 crtc_state->base.active;
16258
16259 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16260 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16261 pipe_name(crtc->pipe));
16262 has_active_crtc = false;
16263 }
16264
16265 connector = intel_encoder_find_connector(encoder);
16266 if (connector && !has_active_crtc) {
16267 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16268 encoder->base.base.id,
16269 encoder->base.name);
16270
16271 /* Connector is active, but has no active pipe. This is
16272 * fallout from our resume register restoring. Disable
16273 * the encoder manually again. */
16274 if (crtc_state) {
16275 struct drm_encoder *best_encoder;
16276
16277 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16278 encoder->base.base.id,
16279 encoder->base.name);
16280
16281 /* avoid oopsing in case the hooks consult best_encoder */
16282 best_encoder = connector->base.state->best_encoder;
16283 connector->base.state->best_encoder = &encoder->base;
16284
16285 if (encoder->disable)
16286 encoder->disable(encoder, crtc_state,
16287 connector->base.state);
16288 if (encoder->post_disable)
16289 encoder->post_disable(encoder, crtc_state,
16290 connector->base.state);
16291
16292 connector->base.state->best_encoder = best_encoder;
16293 }
16294 encoder->base.crtc = NULL;
16295
16296 /* Inconsistent output/port/pipe state happens presumably due to
16297 * a bug in one of the get_hw_state functions. Or someplace else
16298 * in our code, like the register restore mess on resume. Clamp
16299 * things to off as a safer default. */
16300
16301 connector->base.dpms = DRM_MODE_DPMS_OFF;
16302 connector->base.encoder = NULL;
16303 }
16304
16305 /* notify opregion of the sanitized encoder state */
16306 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16307
16308 if (INTEL_GEN(dev_priv) >= 11)
16309 icl_sanitize_encoder_pll_mapping(encoder);
16310 }
16311
16312 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16313 {
16314 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16315
16316 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16317 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16318 i915_disable_vga(dev_priv);
16319 }
16320 }
16321
16322 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16323 {
16324 intel_wakeref_t wakeref;
16325
16326 /*
16327 * This function can be called both from intel_modeset_setup_hw_state or
16328 * at a very early point in our resume sequence, where the power well
16329 * structures are not yet restored. Since this function is at a very
16330 * paranoid "someone might have enabled VGA while we were not looking"
16331 * level, just check if the power well is enabled instead of trying to
16332 * follow the "don't touch the power well if we don't need it" policy
16333 * the rest of the driver uses.
16334 */
16335 wakeref = intel_display_power_get_if_enabled(dev_priv,
16336 POWER_DOMAIN_VGA);
16337 if (!wakeref)
16338 return;
16339
16340 i915_redisable_vga_power_on(dev_priv);
16341
16342 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16343 }
16344
16345 /* FIXME read out full plane state for all planes */
16346 static void readout_plane_state(struct drm_i915_private *dev_priv)
16347 {
16348 struct intel_plane *plane;
16349 struct intel_crtc *crtc;
16350
16351 for_each_intel_plane(&dev_priv->drm, plane) {
16352 struct intel_plane_state *plane_state =
16353 to_intel_plane_state(plane->base.state);
16354 struct intel_crtc_state *crtc_state;
16355 enum pipe pipe = PIPE_A;
16356 bool visible;
16357
16358 visible = plane->get_hw_state(plane, &pipe);
16359
16360 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16361 crtc_state = to_intel_crtc_state(crtc->base.state);
16362
16363 intel_set_plane_visible(crtc_state, plane_state, visible);
16364
16365 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16366 plane->base.base.id, plane->base.name,
16367 enableddisabled(visible), pipe_name(pipe));
16368 }
16369
16370 for_each_intel_crtc(&dev_priv->drm, crtc) {
16371 struct intel_crtc_state *crtc_state =
16372 to_intel_crtc_state(crtc->base.state);
16373
16374 fixup_active_planes(crtc_state);
16375 }
16376 }
16377
16378 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16379 {
16380 struct drm_i915_private *dev_priv = to_i915(dev);
16381 enum pipe pipe;
16382 struct intel_crtc *crtc;
16383 struct intel_encoder *encoder;
16384 struct intel_connector *connector;
16385 struct drm_connector_list_iter conn_iter;
16386 int i;
16387
16388 dev_priv->active_crtcs = 0;
16389
16390 for_each_intel_crtc(dev, crtc) {
16391 struct intel_crtc_state *crtc_state =
16392 to_intel_crtc_state(crtc->base.state);
16393
16394 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16395 memset(crtc_state, 0, sizeof(*crtc_state));
16396 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
16397
16398 crtc_state->base.active = crtc_state->base.enable =
16399 dev_priv->display.get_pipe_config(crtc, crtc_state);
16400
16401 crtc->base.enabled = crtc_state->base.enable;
16402 crtc->active = crtc_state->base.active;
16403
16404 if (crtc_state->base.active)
16405 dev_priv->active_crtcs |= 1 << crtc->pipe;
16406
16407 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16408 crtc->base.base.id, crtc->base.name,
16409 enableddisabled(crtc_state->base.active));
16410 }
16411
16412 readout_plane_state(dev_priv);
16413
16414 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16415 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16416
16417 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16418 &pll->state.hw_state);
16419 pll->state.crtc_mask = 0;
16420 for_each_intel_crtc(dev, crtc) {
16421 struct intel_crtc_state *crtc_state =
16422 to_intel_crtc_state(crtc->base.state);
16423
16424 if (crtc_state->base.active &&
16425 crtc_state->shared_dpll == pll)
16426 pll->state.crtc_mask |= 1 << crtc->pipe;
16427 }
16428 pll->active_mask = pll->state.crtc_mask;
16429
16430 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16431 pll->info->name, pll->state.crtc_mask, pll->on);
16432 }
16433
16434 for_each_intel_encoder(dev, encoder) {
16435 pipe = 0;
16436
16437 if (encoder->get_hw_state(encoder, &pipe)) {
16438 struct intel_crtc_state *crtc_state;
16439
16440 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16441 crtc_state = to_intel_crtc_state(crtc->base.state);
16442
16443 encoder->base.crtc = &crtc->base;
16444 encoder->get_config(encoder, crtc_state);
16445 } else {
16446 encoder->base.crtc = NULL;
16447 }
16448
16449 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16450 encoder->base.base.id, encoder->base.name,
16451 enableddisabled(encoder->base.crtc),
16452 pipe_name(pipe));
16453 }
16454
16455 drm_connector_list_iter_begin(dev, &conn_iter);
16456 for_each_intel_connector_iter(connector, &conn_iter) {
16457 if (connector->get_hw_state(connector)) {
16458 connector->base.dpms = DRM_MODE_DPMS_ON;
16459
16460 encoder = connector->encoder;
16461 connector->base.encoder = &encoder->base;
16462
16463 if (encoder->base.crtc &&
16464 encoder->base.crtc->state->active) {
16465 /*
16466 * This has to be done during hardware readout
16467 * because anything calling .crtc_disable may
16468 * rely on the connector_mask being accurate.
16469 */
16470 encoder->base.crtc->state->connector_mask |=
16471 drm_connector_mask(&connector->base);
16472 encoder->base.crtc->state->encoder_mask |=
16473 drm_encoder_mask(&encoder->base);
16474 }
16475
16476 } else {
16477 connector->base.dpms = DRM_MODE_DPMS_OFF;
16478 connector->base.encoder = NULL;
16479 }
16480 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16481 connector->base.base.id, connector->base.name,
16482 enableddisabled(connector->base.encoder));
16483 }
16484 drm_connector_list_iter_end(&conn_iter);
16485
16486 for_each_intel_crtc(dev, crtc) {
16487 struct intel_bw_state *bw_state =
16488 to_intel_bw_state(dev_priv->bw_obj.state);
16489 struct intel_crtc_state *crtc_state =
16490 to_intel_crtc_state(crtc->base.state);
16491 struct intel_plane *plane;
16492 int min_cdclk = 0;
16493
16494 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16495 if (crtc_state->base.active) {
16496 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16497 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16498 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16499 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16500 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16501
16502 /*
16503 * The initial mode needs to be set in order to keep
16504 * the atomic core happy. It wants a valid mode if the
16505 * crtc's enabled, so we do the above call.
16506 *
16507 * But we don't set all the derived state fully, hence
16508 * set a flag to indicate that a full recalculation is
16509 * needed on the next commit.
16510 */
16511 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16512
16513 intel_crtc_compute_pixel_rate(crtc_state);
16514
16515 if (dev_priv->display.modeset_calc_cdclk) {
16516 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16517 if (WARN_ON(min_cdclk < 0))
16518 min_cdclk = 0;
16519 }
16520
16521 drm_calc_timestamping_constants(&crtc->base,
16522 &crtc_state->base.adjusted_mode);
16523 update_scanline_offset(crtc_state);
16524 }
16525
16526 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16527 dev_priv->min_voltage_level[crtc->pipe] =
16528 crtc_state->min_voltage_level;
16529
16530 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16531 const struct intel_plane_state *plane_state =
16532 to_intel_plane_state(plane->base.state);
16533
16534 /*
16535 * FIXME don't have the fb yet, so can't
16536 * use intel_plane_data_rate() :(
16537 */
16538 if (plane_state->base.visible)
16539 crtc_state->data_rate[plane->id] =
16540 4 * crtc_state->pixel_rate;
16541 }
16542
16543 intel_bw_crtc_update(bw_state, crtc_state);
16544
16545 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16546 }
16547 }
16548
16549 static void
16550 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16551 {
16552 struct intel_encoder *encoder;
16553
16554 for_each_intel_encoder(&dev_priv->drm, encoder) {
16555 struct intel_crtc_state *crtc_state;
16556
16557 if (!encoder->get_power_domains)
16558 continue;
16559
16560 /*
16561 * MST-primary and inactive encoders don't have a crtc state
16562 * and neither of these require any power domain references.
16563 */
16564 if (!encoder->base.crtc)
16565 continue;
16566
16567 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16568 encoder->get_power_domains(encoder, crtc_state);
16569 }
16570 }
16571
16572 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16573 {
16574 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16575 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16576 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16577 DARBF_GATING_DIS);
16578
16579 if (IS_HASWELL(dev_priv)) {
16580 /*
16581 * WaRsPkgCStateDisplayPMReq:hsw
16582 * System hang if this isn't done before disabling all planes!
16583 */
16584 I915_WRITE(CHICKEN_PAR1_1,
16585 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16586 }
16587 }
16588
16589 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16590 enum port port, i915_reg_t hdmi_reg)
16591 {
16592 u32 val = I915_READ(hdmi_reg);
16593
16594 if (val & SDVO_ENABLE ||
16595 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16596 return;
16597
16598 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16599 port_name(port));
16600
16601 val &= ~SDVO_PIPE_SEL_MASK;
16602 val |= SDVO_PIPE_SEL(PIPE_A);
16603
16604 I915_WRITE(hdmi_reg, val);
16605 }
16606
16607 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16608 enum port port, i915_reg_t dp_reg)
16609 {
16610 u32 val = I915_READ(dp_reg);
16611
16612 if (val & DP_PORT_EN ||
16613 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16614 return;
16615
16616 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16617 port_name(port));
16618
16619 val &= ~DP_PIPE_SEL_MASK;
16620 val |= DP_PIPE_SEL(PIPE_A);
16621
16622 I915_WRITE(dp_reg, val);
16623 }
16624
16625 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16626 {
16627 /*
16628 * The BIOS may select transcoder B on some of the PCH
16629 * ports even it doesn't enable the port. This would trip
16630 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16631 * Sanitize the transcoder select bits to prevent that. We
16632 * assume that the BIOS never actually enabled the port,
16633 * because if it did we'd actually have to toggle the port
16634 * on and back off to make the transcoder A select stick
16635 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16636 * intel_disable_sdvo()).
16637 */
16638 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16639 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16640 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16641
16642 /* PCH SDVOB multiplex with HDMIB */
16643 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16644 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16645 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16646 }
16647
16648 /* Scan out the current hw modeset state,
16649 * and sanitizes it to the current state
16650 */
16651 static void
16652 intel_modeset_setup_hw_state(struct drm_device *dev,
16653 struct drm_modeset_acquire_ctx *ctx)
16654 {
16655 struct drm_i915_private *dev_priv = to_i915(dev);
16656 struct intel_crtc_state *crtc_state;
16657 struct intel_encoder *encoder;
16658 struct intel_crtc *crtc;
16659 intel_wakeref_t wakeref;
16660 int i;
16661
16662 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16663
16664 intel_early_display_was(dev_priv);
16665 intel_modeset_readout_hw_state(dev);
16666
16667 /* HW state is read out, now we need to sanitize this mess. */
16668 get_encoder_power_domains(dev_priv);
16669
16670 if (HAS_PCH_IBX(dev_priv))
16671 ibx_sanitize_pch_ports(dev_priv);
16672
16673 /*
16674 * intel_sanitize_plane_mapping() may need to do vblank
16675 * waits, so we need vblank interrupts restored beforehand.
16676 */
16677 for_each_intel_crtc(&dev_priv->drm, crtc) {
16678 crtc_state = to_intel_crtc_state(crtc->base.state);
16679
16680 drm_crtc_vblank_reset(&crtc->base);
16681
16682 if (crtc_state->base.active)
16683 intel_crtc_vblank_on(crtc_state);
16684 }
16685
16686 intel_sanitize_plane_mapping(dev_priv);
16687
16688 for_each_intel_encoder(dev, encoder)
16689 intel_sanitize_encoder(encoder);
16690
16691 for_each_intel_crtc(&dev_priv->drm, crtc) {
16692 crtc_state = to_intel_crtc_state(crtc->base.state);
16693 intel_sanitize_crtc(crtc, ctx);
16694 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
16695 }
16696
16697 intel_modeset_update_connector_atomic_state(dev);
16698
16699 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16700 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16701
16702 if (!pll->on || pll->active_mask)
16703 continue;
16704
16705 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16706 pll->info->name);
16707
16708 pll->info->funcs->disable(dev_priv, pll);
16709 pll->on = false;
16710 }
16711
16712 if (IS_G4X(dev_priv)) {
16713 g4x_wm_get_hw_state(dev_priv);
16714 g4x_wm_sanitize(dev_priv);
16715 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16716 vlv_wm_get_hw_state(dev_priv);
16717 vlv_wm_sanitize(dev_priv);
16718 } else if (INTEL_GEN(dev_priv) >= 9) {
16719 skl_wm_get_hw_state(dev_priv);
16720 } else if (HAS_PCH_SPLIT(dev_priv)) {
16721 ilk_wm_get_hw_state(dev_priv);
16722 }
16723
16724 for_each_intel_crtc(dev, crtc) {
16725 u64 put_domains;
16726
16727 crtc_state = to_intel_crtc_state(crtc->base.state);
16728 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16729 if (WARN_ON(put_domains))
16730 modeset_put_power_domains(dev_priv, put_domains);
16731 }
16732
16733 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16734
16735 intel_fbc_init_pipe_state(dev_priv);
16736 }
16737
16738 void intel_display_resume(struct drm_device *dev)
16739 {
16740 struct drm_i915_private *dev_priv = to_i915(dev);
16741 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16742 struct drm_modeset_acquire_ctx ctx;
16743 int ret;
16744
16745 dev_priv->modeset_restore_state = NULL;
16746 if (state)
16747 state->acquire_ctx = &ctx;
16748
16749 drm_modeset_acquire_init(&ctx, 0);
16750
16751 while (1) {
16752 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16753 if (ret != -EDEADLK)
16754 break;
16755
16756 drm_modeset_backoff(&ctx);
16757 }
16758
16759 if (!ret)
16760 ret = __intel_display_resume(dev, state, &ctx);
16761
16762 intel_enable_ipc(dev_priv);
16763 drm_modeset_drop_locks(&ctx);
16764 drm_modeset_acquire_fini(&ctx);
16765
16766 if (ret)
16767 DRM_ERROR("Restoring old state failed with %i\n", ret);
16768 if (state)
16769 drm_atomic_state_put(state);
16770 }
16771
16772 static void intel_hpd_poll_fini(struct drm_device *dev)
16773 {
16774 struct intel_connector *connector;
16775 struct drm_connector_list_iter conn_iter;
16776
16777 /* Kill all the work that may have been queued by hpd. */
16778 drm_connector_list_iter_begin(dev, &conn_iter);
16779 for_each_intel_connector_iter(connector, &conn_iter) {
16780 if (connector->modeset_retry_work.func)
16781 cancel_work_sync(&connector->modeset_retry_work);
16782 if (connector->hdcp.shim) {
16783 cancel_delayed_work_sync(&connector->hdcp.check_work);
16784 cancel_work_sync(&connector->hdcp.prop_work);
16785 }
16786 }
16787 drm_connector_list_iter_end(&conn_iter);
16788 }
16789
16790 void intel_modeset_cleanup(struct drm_device *dev)
16791 {
16792 struct drm_i915_private *dev_priv = to_i915(dev);
16793
16794 flush_workqueue(dev_priv->modeset_wq);
16795
16796 flush_work(&dev_priv->atomic_helper.free_work);
16797 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16798
16799 /*
16800 * Interrupts and polling as the first thing to avoid creating havoc.
16801 * Too much stuff here (turning of connectors, ...) would
16802 * experience fancy races otherwise.
16803 */
16804 intel_irq_uninstall(dev_priv);
16805
16806 /*
16807 * Due to the hpd irq storm handling the hotplug work can re-arm the
16808 * poll handlers. Hence disable polling after hpd handling is shut down.
16809 */
16810 intel_hpd_poll_fini(dev);
16811
16812 /* poll work can call into fbdev, hence clean that up afterwards */
16813 intel_fbdev_fini(dev_priv);
16814
16815 intel_unregister_dsm_handler();
16816
16817 intel_fbc_global_disable(dev_priv);
16818
16819 /* flush any delayed tasks or pending work */
16820 flush_scheduled_work();
16821
16822 intel_hdcp_component_fini(dev_priv);
16823
16824 drm_mode_config_cleanup(dev);
16825
16826 intel_overlay_cleanup(dev_priv);
16827
16828 intel_gmbus_teardown(dev_priv);
16829
16830 destroy_workqueue(dev_priv->modeset_wq);
16831
16832 intel_fbc_cleanup_cfb(dev_priv);
16833 }
16834
16835 /*
16836 * set vga decode state - true == enable VGA decode
16837 */
16838 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16839 {
16840 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16841 u16 gmch_ctrl;
16842
16843 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16844 DRM_ERROR("failed to read control word\n");
16845 return -EIO;
16846 }
16847
16848 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16849 return 0;
16850
16851 if (state)
16852 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16853 else
16854 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16855
16856 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16857 DRM_ERROR("failed to write control word\n");
16858 return -EIO;
16859 }
16860
16861 return 0;
16862 }
16863
16864 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16865
16866 struct intel_display_error_state {
16867
16868 u32 power_well_driver;
16869
16870 struct intel_cursor_error_state {
16871 u32 control;
16872 u32 position;
16873 u32 base;
16874 u32 size;
16875 } cursor[I915_MAX_PIPES];
16876
16877 struct intel_pipe_error_state {
16878 bool power_domain_on;
16879 u32 source;
16880 u32 stat;
16881 } pipe[I915_MAX_PIPES];
16882
16883 struct intel_plane_error_state {
16884 u32 control;
16885 u32 stride;
16886 u32 size;
16887 u32 pos;
16888 u32 addr;
16889 u32 surface;
16890 u32 tile_offset;
16891 } plane[I915_MAX_PIPES];
16892
16893 struct intel_transcoder_error_state {
16894 bool available;
16895 bool power_domain_on;
16896 enum transcoder cpu_transcoder;
16897
16898 u32 conf;
16899
16900 u32 htotal;
16901 u32 hblank;
16902 u32 hsync;
16903 u32 vtotal;
16904 u32 vblank;
16905 u32 vsync;
16906 } transcoder[4];
16907 };
16908
16909 struct intel_display_error_state *
16910 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16911 {
16912 struct intel_display_error_state *error;
16913 int transcoders[] = {
16914 TRANSCODER_A,
16915 TRANSCODER_B,
16916 TRANSCODER_C,
16917 TRANSCODER_EDP,
16918 };
16919 int i;
16920
16921 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
16922
16923 if (!HAS_DISPLAY(dev_priv))
16924 return NULL;
16925
16926 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16927 if (error == NULL)
16928 return NULL;
16929
16930 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16931 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16932
16933 for_each_pipe(dev_priv, i) {
16934 error->pipe[i].power_domain_on =
16935 __intel_display_power_is_enabled(dev_priv,
16936 POWER_DOMAIN_PIPE(i));
16937 if (!error->pipe[i].power_domain_on)
16938 continue;
16939
16940 error->cursor[i].control = I915_READ(CURCNTR(i));
16941 error->cursor[i].position = I915_READ(CURPOS(i));
16942 error->cursor[i].base = I915_READ(CURBASE(i));
16943
16944 error->plane[i].control = I915_READ(DSPCNTR(i));
16945 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16946 if (INTEL_GEN(dev_priv) <= 3) {
16947 error->plane[i].size = I915_READ(DSPSIZE(i));
16948 error->plane[i].pos = I915_READ(DSPPOS(i));
16949 }
16950 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16951 error->plane[i].addr = I915_READ(DSPADDR(i));
16952 if (INTEL_GEN(dev_priv) >= 4) {
16953 error->plane[i].surface = I915_READ(DSPSURF(i));
16954 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16955 }
16956
16957 error->pipe[i].source = I915_READ(PIPESRC(i));
16958
16959 if (HAS_GMCH(dev_priv))
16960 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16961 }
16962
16963 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16964 enum transcoder cpu_transcoder = transcoders[i];
16965
16966 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
16967 continue;
16968
16969 error->transcoder[i].available = true;
16970 error->transcoder[i].power_domain_on =
16971 __intel_display_power_is_enabled(dev_priv,
16972 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16973 if (!error->transcoder[i].power_domain_on)
16974 continue;
16975
16976 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16977
16978 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16979 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16980 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16981 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16982 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16983 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16984 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16985 }
16986
16987 return error;
16988 }
16989
16990 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16991
16992 void
16993 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16994 struct intel_display_error_state *error)
16995 {
16996 struct drm_i915_private *dev_priv = m->i915;
16997 int i;
16998
16999 if (!error)
17000 return;
17001
17002 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
17003 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17004 err_printf(m, "PWR_WELL_CTL2: %08x\n",
17005 error->power_well_driver);
17006 for_each_pipe(dev_priv, i) {
17007 err_printf(m, "Pipe [%d]:\n", i);
17008 err_printf(m, " Power: %s\n",
17009 onoff(error->pipe[i].power_domain_on));
17010 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
17011 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
17012
17013 err_printf(m, "Plane [%d]:\n", i);
17014 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
17015 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
17016 if (INTEL_GEN(dev_priv) <= 3) {
17017 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
17018 err_printf(m, " POS: %08x\n", error->plane[i].pos);
17019 }
17020 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17021 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
17022 if (INTEL_GEN(dev_priv) >= 4) {
17023 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
17024 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
17025 }
17026
17027 err_printf(m, "Cursor [%d]:\n", i);
17028 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
17029 err_printf(m, " POS: %08x\n", error->cursor[i].position);
17030 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
17031 }
17032
17033 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17034 if (!error->transcoder[i].available)
17035 continue;
17036
17037 err_printf(m, "CPU transcoder: %s\n",
17038 transcoder_name(error->transcoder[i].cpu_transcoder));
17039 err_printf(m, " Power: %s\n",
17040 onoff(error->transcoder[i].power_domain_on));
17041 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
17042 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
17043 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
17044 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
17045 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
17046 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
17047 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
17048 }
17049 }
17050
17051 #endif