]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_display.c
4cbea30439ba56c0656b795f2df8fe04a98dc1cc
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_trace.h"
49 #include "intel_acpi.h"
50 #include "intel_atomic.h"
51 #include "intel_atomic_plane.h"
52 #include "intel_bw.h"
53 #include "intel_color.h"
54 #include "intel_cdclk.h"
55 #include "intel_crt.h"
56 #include "intel_ddi.h"
57 #include "intel_dp.h"
58 #include "intel_drv.h"
59 #include "intel_dsi.h"
60 #include "intel_dvo.h"
61 #include "intel_fbc.h"
62 #include "intel_fbdev.h"
63 #include "intel_fifo_underrun.h"
64 #include "intel_frontbuffer.h"
65 #include "intel_gmbus.h"
66 #include "intel_hdcp.h"
67 #include "intel_hdmi.h"
68 #include "intel_hotplug.h"
69 #include "intel_lvds.h"
70 #include "intel_overlay.h"
71 #include "intel_pipe_crc.h"
72 #include "intel_pm.h"
73 #include "intel_psr.h"
74 #include "intel_quirks.h"
75 #include "intel_sdvo.h"
76 #include "intel_sideband.h"
77 #include "intel_sprite.h"
78 #include "intel_tv.h"
79 #include "intel_vdsc.h"
80
81 /* Primary plane formats for gen <= 3 */
82 static const u32 i8xx_primary_formats[] = {
83 DRM_FORMAT_C8,
84 DRM_FORMAT_RGB565,
85 DRM_FORMAT_XRGB1555,
86 DRM_FORMAT_XRGB8888,
87 };
88
89 /* Primary plane formats for gen >= 4 */
90 static const u32 i965_primary_formats[] = {
91 DRM_FORMAT_C8,
92 DRM_FORMAT_RGB565,
93 DRM_FORMAT_XRGB8888,
94 DRM_FORMAT_XBGR8888,
95 DRM_FORMAT_XRGB2101010,
96 DRM_FORMAT_XBGR2101010,
97 };
98
99 static const u64 i9xx_format_modifiers[] = {
100 I915_FORMAT_MOD_X_TILED,
101 DRM_FORMAT_MOD_LINEAR,
102 DRM_FORMAT_MOD_INVALID
103 };
104
105 /* Cursor formats */
106 static const u32 intel_cursor_formats[] = {
107 DRM_FORMAT_ARGB8888,
108 };
109
110 static const u64 cursor_format_modifiers[] = {
111 DRM_FORMAT_MOD_LINEAR,
112 DRM_FORMAT_MOD_INVALID
113 };
114
115 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
116 struct intel_crtc_state *pipe_config);
117 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
118 struct intel_crtc_state *pipe_config);
119
120 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
121 struct drm_i915_gem_object *obj,
122 struct drm_mode_fb_cmd2 *mode_cmd);
123 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
124 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
125 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
126 const struct intel_link_m_n *m_n,
127 const struct intel_link_m_n *m2_n2);
128 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
129 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
130 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
132 static void vlv_prepare_pll(struct intel_crtc *crtc,
133 const struct intel_crtc_state *pipe_config);
134 static void chv_prepare_pll(struct intel_crtc *crtc,
135 const struct intel_crtc_state *pipe_config);
136 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
137 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
138 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
139 struct intel_crtc_state *crtc_state);
140 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
141 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
142 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
143 static void intel_modeset_setup_hw_state(struct drm_device *dev,
144 struct drm_modeset_acquire_ctx *ctx);
145 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
146
147 struct intel_limit {
148 struct {
149 int min, max;
150 } dot, vco, n, m, m1, m2, p, p1;
151
152 struct {
153 int dot_limit;
154 int p2_slow, p2_fast;
155 } p2;
156 };
157
158 /* returns HPLL frequency in kHz */
159 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
160 {
161 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
162
163 /* Obtain SKU information */
164 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
165 CCK_FUSE_HPLL_FREQ_MASK;
166
167 return vco_freq[hpll_freq] * 1000;
168 }
169
170 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
171 const char *name, u32 reg, int ref_freq)
172 {
173 u32 val;
174 int divider;
175
176 val = vlv_cck_read(dev_priv, reg);
177 divider = val & CCK_FREQUENCY_VALUES;
178
179 WARN((val & CCK_FREQUENCY_STATUS) !=
180 (divider << CCK_FREQUENCY_STATUS_SHIFT),
181 "%s change in progress\n", name);
182
183 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
184 }
185
186 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
187 const char *name, u32 reg)
188 {
189 int hpll;
190
191 vlv_cck_get(dev_priv);
192
193 if (dev_priv->hpll_freq == 0)
194 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
195
196 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
197
198 vlv_cck_put(dev_priv);
199
200 return hpll;
201 }
202
203 static void intel_update_czclk(struct drm_i915_private *dev_priv)
204 {
205 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
206 return;
207
208 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
209 CCK_CZ_CLOCK_CONTROL);
210
211 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
212 }
213
214 static inline u32 /* units of 100MHz */
215 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
216 const struct intel_crtc_state *pipe_config)
217 {
218 if (HAS_DDI(dev_priv))
219 return pipe_config->port_clock; /* SPLL */
220 else
221 return dev_priv->fdi_pll_freq;
222 }
223
224 static const struct intel_limit intel_limits_i8xx_dac = {
225 .dot = { .min = 25000, .max = 350000 },
226 .vco = { .min = 908000, .max = 1512000 },
227 .n = { .min = 2, .max = 16 },
228 .m = { .min = 96, .max = 140 },
229 .m1 = { .min = 18, .max = 26 },
230 .m2 = { .min = 6, .max = 16 },
231 .p = { .min = 4, .max = 128 },
232 .p1 = { .min = 2, .max = 33 },
233 .p2 = { .dot_limit = 165000,
234 .p2_slow = 4, .p2_fast = 2 },
235 };
236
237 static const struct intel_limit intel_limits_i8xx_dvo = {
238 .dot = { .min = 25000, .max = 350000 },
239 .vco = { .min = 908000, .max = 1512000 },
240 .n = { .min = 2, .max = 16 },
241 .m = { .min = 96, .max = 140 },
242 .m1 = { .min = 18, .max = 26 },
243 .m2 = { .min = 6, .max = 16 },
244 .p = { .min = 4, .max = 128 },
245 .p1 = { .min = 2, .max = 33 },
246 .p2 = { .dot_limit = 165000,
247 .p2_slow = 4, .p2_fast = 4 },
248 };
249
250 static const struct intel_limit intel_limits_i8xx_lvds = {
251 .dot = { .min = 25000, .max = 350000 },
252 .vco = { .min = 908000, .max = 1512000 },
253 .n = { .min = 2, .max = 16 },
254 .m = { .min = 96, .max = 140 },
255 .m1 = { .min = 18, .max = 26 },
256 .m2 = { .min = 6, .max = 16 },
257 .p = { .min = 4, .max = 128 },
258 .p1 = { .min = 1, .max = 6 },
259 .p2 = { .dot_limit = 165000,
260 .p2_slow = 14, .p2_fast = 7 },
261 };
262
263 static const struct intel_limit intel_limits_i9xx_sdvo = {
264 .dot = { .min = 20000, .max = 400000 },
265 .vco = { .min = 1400000, .max = 2800000 },
266 .n = { .min = 1, .max = 6 },
267 .m = { .min = 70, .max = 120 },
268 .m1 = { .min = 8, .max = 18 },
269 .m2 = { .min = 3, .max = 7 },
270 .p = { .min = 5, .max = 80 },
271 .p1 = { .min = 1, .max = 8 },
272 .p2 = { .dot_limit = 200000,
273 .p2_slow = 10, .p2_fast = 5 },
274 };
275
276 static const struct intel_limit intel_limits_i9xx_lvds = {
277 .dot = { .min = 20000, .max = 400000 },
278 .vco = { .min = 1400000, .max = 2800000 },
279 .n = { .min = 1, .max = 6 },
280 .m = { .min = 70, .max = 120 },
281 .m1 = { .min = 8, .max = 18 },
282 .m2 = { .min = 3, .max = 7 },
283 .p = { .min = 7, .max = 98 },
284 .p1 = { .min = 1, .max = 8 },
285 .p2 = { .dot_limit = 112000,
286 .p2_slow = 14, .p2_fast = 7 },
287 };
288
289
290 static const struct intel_limit intel_limits_g4x_sdvo = {
291 .dot = { .min = 25000, .max = 270000 },
292 .vco = { .min = 1750000, .max = 3500000},
293 .n = { .min = 1, .max = 4 },
294 .m = { .min = 104, .max = 138 },
295 .m1 = { .min = 17, .max = 23 },
296 .m2 = { .min = 5, .max = 11 },
297 .p = { .min = 10, .max = 30 },
298 .p1 = { .min = 1, .max = 3},
299 .p2 = { .dot_limit = 270000,
300 .p2_slow = 10,
301 .p2_fast = 10
302 },
303 };
304
305 static const struct intel_limit intel_limits_g4x_hdmi = {
306 .dot = { .min = 22000, .max = 400000 },
307 .vco = { .min = 1750000, .max = 3500000},
308 .n = { .min = 1, .max = 4 },
309 .m = { .min = 104, .max = 138 },
310 .m1 = { .min = 16, .max = 23 },
311 .m2 = { .min = 5, .max = 11 },
312 .p = { .min = 5, .max = 80 },
313 .p1 = { .min = 1, .max = 8},
314 .p2 = { .dot_limit = 165000,
315 .p2_slow = 10, .p2_fast = 5 },
316 };
317
318 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
319 .dot = { .min = 20000, .max = 115000 },
320 .vco = { .min = 1750000, .max = 3500000 },
321 .n = { .min = 1, .max = 3 },
322 .m = { .min = 104, .max = 138 },
323 .m1 = { .min = 17, .max = 23 },
324 .m2 = { .min = 5, .max = 11 },
325 .p = { .min = 28, .max = 112 },
326 .p1 = { .min = 2, .max = 8 },
327 .p2 = { .dot_limit = 0,
328 .p2_slow = 14, .p2_fast = 14
329 },
330 };
331
332 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
333 .dot = { .min = 80000, .max = 224000 },
334 .vco = { .min = 1750000, .max = 3500000 },
335 .n = { .min = 1, .max = 3 },
336 .m = { .min = 104, .max = 138 },
337 .m1 = { .min = 17, .max = 23 },
338 .m2 = { .min = 5, .max = 11 },
339 .p = { .min = 14, .max = 42 },
340 .p1 = { .min = 2, .max = 6 },
341 .p2 = { .dot_limit = 0,
342 .p2_slow = 7, .p2_fast = 7
343 },
344 };
345
346 static const struct intel_limit intel_limits_pineview_sdvo = {
347 .dot = { .min = 20000, .max = 400000},
348 .vco = { .min = 1700000, .max = 3500000 },
349 /* Pineview's Ncounter is a ring counter */
350 .n = { .min = 3, .max = 6 },
351 .m = { .min = 2, .max = 256 },
352 /* Pineview only has one combined m divider, which we treat as m2. */
353 .m1 = { .min = 0, .max = 0 },
354 .m2 = { .min = 0, .max = 254 },
355 .p = { .min = 5, .max = 80 },
356 .p1 = { .min = 1, .max = 8 },
357 .p2 = { .dot_limit = 200000,
358 .p2_slow = 10, .p2_fast = 5 },
359 };
360
361 static const struct intel_limit intel_limits_pineview_lvds = {
362 .dot = { .min = 20000, .max = 400000 },
363 .vco = { .min = 1700000, .max = 3500000 },
364 .n = { .min = 3, .max = 6 },
365 .m = { .min = 2, .max = 256 },
366 .m1 = { .min = 0, .max = 0 },
367 .m2 = { .min = 0, .max = 254 },
368 .p = { .min = 7, .max = 112 },
369 .p1 = { .min = 1, .max = 8 },
370 .p2 = { .dot_limit = 112000,
371 .p2_slow = 14, .p2_fast = 14 },
372 };
373
374 /* Ironlake / Sandybridge
375 *
376 * We calculate clock using (register_value + 2) for N/M1/M2, so here
377 * the range value for them is (actual_value - 2).
378 */
379 static const struct intel_limit intel_limits_ironlake_dac = {
380 .dot = { .min = 25000, .max = 350000 },
381 .vco = { .min = 1760000, .max = 3510000 },
382 .n = { .min = 1, .max = 5 },
383 .m = { .min = 79, .max = 127 },
384 .m1 = { .min = 12, .max = 22 },
385 .m2 = { .min = 5, .max = 9 },
386 .p = { .min = 5, .max = 80 },
387 .p1 = { .min = 1, .max = 8 },
388 .p2 = { .dot_limit = 225000,
389 .p2_slow = 10, .p2_fast = 5 },
390 };
391
392 static const struct intel_limit intel_limits_ironlake_single_lvds = {
393 .dot = { .min = 25000, .max = 350000 },
394 .vco = { .min = 1760000, .max = 3510000 },
395 .n = { .min = 1, .max = 3 },
396 .m = { .min = 79, .max = 118 },
397 .m1 = { .min = 12, .max = 22 },
398 .m2 = { .min = 5, .max = 9 },
399 .p = { .min = 28, .max = 112 },
400 .p1 = { .min = 2, .max = 8 },
401 .p2 = { .dot_limit = 225000,
402 .p2_slow = 14, .p2_fast = 14 },
403 };
404
405 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
406 .dot = { .min = 25000, .max = 350000 },
407 .vco = { .min = 1760000, .max = 3510000 },
408 .n = { .min = 1, .max = 3 },
409 .m = { .min = 79, .max = 127 },
410 .m1 = { .min = 12, .max = 22 },
411 .m2 = { .min = 5, .max = 9 },
412 .p = { .min = 14, .max = 56 },
413 .p1 = { .min = 2, .max = 8 },
414 .p2 = { .dot_limit = 225000,
415 .p2_slow = 7, .p2_fast = 7 },
416 };
417
418 /* LVDS 100mhz refclk limits. */
419 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
420 .dot = { .min = 25000, .max = 350000 },
421 .vco = { .min = 1760000, .max = 3510000 },
422 .n = { .min = 1, .max = 2 },
423 .m = { .min = 79, .max = 126 },
424 .m1 = { .min = 12, .max = 22 },
425 .m2 = { .min = 5, .max = 9 },
426 .p = { .min = 28, .max = 112 },
427 .p1 = { .min = 2, .max = 8 },
428 .p2 = { .dot_limit = 225000,
429 .p2_slow = 14, .p2_fast = 14 },
430 };
431
432 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
433 .dot = { .min = 25000, .max = 350000 },
434 .vco = { .min = 1760000, .max = 3510000 },
435 .n = { .min = 1, .max = 3 },
436 .m = { .min = 79, .max = 126 },
437 .m1 = { .min = 12, .max = 22 },
438 .m2 = { .min = 5, .max = 9 },
439 .p = { .min = 14, .max = 42 },
440 .p1 = { .min = 2, .max = 6 },
441 .p2 = { .dot_limit = 225000,
442 .p2_slow = 7, .p2_fast = 7 },
443 };
444
445 static const struct intel_limit intel_limits_vlv = {
446 /*
447 * These are the data rate limits (measured in fast clocks)
448 * since those are the strictest limits we have. The fast
449 * clock and actual rate limits are more relaxed, so checking
450 * them would make no difference.
451 */
452 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
453 .vco = { .min = 4000000, .max = 6000000 },
454 .n = { .min = 1, .max = 7 },
455 .m1 = { .min = 2, .max = 3 },
456 .m2 = { .min = 11, .max = 156 },
457 .p1 = { .min = 2, .max = 3 },
458 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
459 };
460
461 static const struct intel_limit intel_limits_chv = {
462 /*
463 * These are the data rate limits (measured in fast clocks)
464 * since those are the strictest limits we have. The fast
465 * clock and actual rate limits are more relaxed, so checking
466 * them would make no difference.
467 */
468 .dot = { .min = 25000 * 5, .max = 540000 * 5},
469 .vco = { .min = 4800000, .max = 6480000 },
470 .n = { .min = 1, .max = 1 },
471 .m1 = { .min = 2, .max = 2 },
472 .m2 = { .min = 24 << 22, .max = 175 << 22 },
473 .p1 = { .min = 2, .max = 4 },
474 .p2 = { .p2_slow = 1, .p2_fast = 14 },
475 };
476
477 static const struct intel_limit intel_limits_bxt = {
478 /* FIXME: find real dot limits */
479 .dot = { .min = 0, .max = INT_MAX },
480 .vco = { .min = 4800000, .max = 6700000 },
481 .n = { .min = 1, .max = 1 },
482 .m1 = { .min = 2, .max = 2 },
483 /* FIXME: find real m2 limits */
484 .m2 = { .min = 2 << 22, .max = 255 << 22 },
485 .p1 = { .min = 2, .max = 4 },
486 .p2 = { .p2_slow = 1, .p2_fast = 20 },
487 };
488
489 /* WA Display #0827: Gen9:all */
490 static void
491 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
492 {
493 if (enable)
494 I915_WRITE(CLKGATE_DIS_PSL(pipe),
495 I915_READ(CLKGATE_DIS_PSL(pipe)) |
496 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
497 else
498 I915_WRITE(CLKGATE_DIS_PSL(pipe),
499 I915_READ(CLKGATE_DIS_PSL(pipe)) &
500 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
501 }
502
503 /* Wa_2006604312:icl */
504 static void
505 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
506 bool enable)
507 {
508 if (enable)
509 I915_WRITE(CLKGATE_DIS_PSL(pipe),
510 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
511 else
512 I915_WRITE(CLKGATE_DIS_PSL(pipe),
513 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
514 }
515
516 static bool
517 needs_modeset(const struct drm_crtc_state *state)
518 {
519 return drm_atomic_crtc_needs_modeset(state);
520 }
521
522 /*
523 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
524 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
525 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
526 * The helpers' return value is the rate of the clock that is fed to the
527 * display engine's pipe which can be the above fast dot clock rate or a
528 * divided-down version of it.
529 */
530 /* m1 is reserved as 0 in Pineview, n is a ring counter */
531 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
532 {
533 clock->m = clock->m2 + 2;
534 clock->p = clock->p1 * clock->p2;
535 if (WARN_ON(clock->n == 0 || clock->p == 0))
536 return 0;
537 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
538 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
539
540 return clock->dot;
541 }
542
543 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
544 {
545 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
546 }
547
548 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
549 {
550 clock->m = i9xx_dpll_compute_m(clock);
551 clock->p = clock->p1 * clock->p2;
552 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
553 return 0;
554 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
555 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
556
557 return clock->dot;
558 }
559
560 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
561 {
562 clock->m = clock->m1 * clock->m2;
563 clock->p = clock->p1 * clock->p2;
564 if (WARN_ON(clock->n == 0 || clock->p == 0))
565 return 0;
566 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
567 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
568
569 return clock->dot / 5;
570 }
571
572 int chv_calc_dpll_params(int refclk, struct dpll *clock)
573 {
574 clock->m = clock->m1 * clock->m2;
575 clock->p = clock->p1 * clock->p2;
576 if (WARN_ON(clock->n == 0 || clock->p == 0))
577 return 0;
578 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
579 clock->n << 22);
580 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
581
582 return clock->dot / 5;
583 }
584
585 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
586
587 /*
588 * Returns whether the given set of divisors are valid for a given refclk with
589 * the given connectors.
590 */
591 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
592 const struct intel_limit *limit,
593 const struct dpll *clock)
594 {
595 if (clock->n < limit->n.min || limit->n.max < clock->n)
596 INTELPllInvalid("n out of range\n");
597 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
598 INTELPllInvalid("p1 out of range\n");
599 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
600 INTELPllInvalid("m2 out of range\n");
601 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
602 INTELPllInvalid("m1 out of range\n");
603
604 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
605 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
606 if (clock->m1 <= clock->m2)
607 INTELPllInvalid("m1 <= m2\n");
608
609 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
610 !IS_GEN9_LP(dev_priv)) {
611 if (clock->p < limit->p.min || limit->p.max < clock->p)
612 INTELPllInvalid("p out of range\n");
613 if (clock->m < limit->m.min || limit->m.max < clock->m)
614 INTELPllInvalid("m out of range\n");
615 }
616
617 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
618 INTELPllInvalid("vco out of range\n");
619 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
620 * connector, etc., rather than just a single range.
621 */
622 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
623 INTELPllInvalid("dot out of range\n");
624
625 return true;
626 }
627
628 static int
629 i9xx_select_p2_div(const struct intel_limit *limit,
630 const struct intel_crtc_state *crtc_state,
631 int target)
632 {
633 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
634
635 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
636 /*
637 * For LVDS just rely on its current settings for dual-channel.
638 * We haven't figured out how to reliably set up different
639 * single/dual channel state, if we even can.
640 */
641 if (intel_is_dual_link_lvds(dev_priv))
642 return limit->p2.p2_fast;
643 else
644 return limit->p2.p2_slow;
645 } else {
646 if (target < limit->p2.dot_limit)
647 return limit->p2.p2_slow;
648 else
649 return limit->p2.p2_fast;
650 }
651 }
652
653 /*
654 * Returns a set of divisors for the desired target clock with the given
655 * refclk, or FALSE. The returned values represent the clock equation:
656 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
657 *
658 * Target and reference clocks are specified in kHz.
659 *
660 * If match_clock is provided, then best_clock P divider must match the P
661 * divider from @match_clock used for LVDS downclocking.
662 */
663 static bool
664 i9xx_find_best_dpll(const struct intel_limit *limit,
665 struct intel_crtc_state *crtc_state,
666 int target, int refclk, struct dpll *match_clock,
667 struct dpll *best_clock)
668 {
669 struct drm_device *dev = crtc_state->base.crtc->dev;
670 struct dpll clock;
671 int err = target;
672
673 memset(best_clock, 0, sizeof(*best_clock));
674
675 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
676
677 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
678 clock.m1++) {
679 for (clock.m2 = limit->m2.min;
680 clock.m2 <= limit->m2.max; clock.m2++) {
681 if (clock.m2 >= clock.m1)
682 break;
683 for (clock.n = limit->n.min;
684 clock.n <= limit->n.max; clock.n++) {
685 for (clock.p1 = limit->p1.min;
686 clock.p1 <= limit->p1.max; clock.p1++) {
687 int this_err;
688
689 i9xx_calc_dpll_params(refclk, &clock);
690 if (!intel_PLL_is_valid(to_i915(dev),
691 limit,
692 &clock))
693 continue;
694 if (match_clock &&
695 clock.p != match_clock->p)
696 continue;
697
698 this_err = abs(clock.dot - target);
699 if (this_err < err) {
700 *best_clock = clock;
701 err = this_err;
702 }
703 }
704 }
705 }
706 }
707
708 return (err != target);
709 }
710
711 /*
712 * Returns a set of divisors for the desired target clock with the given
713 * refclk, or FALSE. The returned values represent the clock equation:
714 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
715 *
716 * Target and reference clocks are specified in kHz.
717 *
718 * If match_clock is provided, then best_clock P divider must match the P
719 * divider from @match_clock used for LVDS downclocking.
720 */
721 static bool
722 pnv_find_best_dpll(const struct intel_limit *limit,
723 struct intel_crtc_state *crtc_state,
724 int target, int refclk, struct dpll *match_clock,
725 struct dpll *best_clock)
726 {
727 struct drm_device *dev = crtc_state->base.crtc->dev;
728 struct dpll clock;
729 int err = target;
730
731 memset(best_clock, 0, sizeof(*best_clock));
732
733 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
734
735 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
736 clock.m1++) {
737 for (clock.m2 = limit->m2.min;
738 clock.m2 <= limit->m2.max; clock.m2++) {
739 for (clock.n = limit->n.min;
740 clock.n <= limit->n.max; clock.n++) {
741 for (clock.p1 = limit->p1.min;
742 clock.p1 <= limit->p1.max; clock.p1++) {
743 int this_err;
744
745 pnv_calc_dpll_params(refclk, &clock);
746 if (!intel_PLL_is_valid(to_i915(dev),
747 limit,
748 &clock))
749 continue;
750 if (match_clock &&
751 clock.p != match_clock->p)
752 continue;
753
754 this_err = abs(clock.dot - target);
755 if (this_err < err) {
756 *best_clock = clock;
757 err = this_err;
758 }
759 }
760 }
761 }
762 }
763
764 return (err != target);
765 }
766
767 /*
768 * Returns a set of divisors for the desired target clock with the given
769 * refclk, or FALSE. The returned values represent the clock equation:
770 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
771 *
772 * Target and reference clocks are specified in kHz.
773 *
774 * If match_clock is provided, then best_clock P divider must match the P
775 * divider from @match_clock used for LVDS downclocking.
776 */
777 static bool
778 g4x_find_best_dpll(const struct intel_limit *limit,
779 struct intel_crtc_state *crtc_state,
780 int target, int refclk, struct dpll *match_clock,
781 struct dpll *best_clock)
782 {
783 struct drm_device *dev = crtc_state->base.crtc->dev;
784 struct dpll clock;
785 int max_n;
786 bool found = false;
787 /* approximately equals target * 0.00585 */
788 int err_most = (target >> 8) + (target >> 9);
789
790 memset(best_clock, 0, sizeof(*best_clock));
791
792 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
793
794 max_n = limit->n.max;
795 /* based on hardware requirement, prefer smaller n to precision */
796 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
797 /* based on hardware requirement, prefere larger m1,m2 */
798 for (clock.m1 = limit->m1.max;
799 clock.m1 >= limit->m1.min; clock.m1--) {
800 for (clock.m2 = limit->m2.max;
801 clock.m2 >= limit->m2.min; clock.m2--) {
802 for (clock.p1 = limit->p1.max;
803 clock.p1 >= limit->p1.min; clock.p1--) {
804 int this_err;
805
806 i9xx_calc_dpll_params(refclk, &clock);
807 if (!intel_PLL_is_valid(to_i915(dev),
808 limit,
809 &clock))
810 continue;
811
812 this_err = abs(clock.dot - target);
813 if (this_err < err_most) {
814 *best_clock = clock;
815 err_most = this_err;
816 max_n = clock.n;
817 found = true;
818 }
819 }
820 }
821 }
822 }
823 return found;
824 }
825
826 /*
827 * Check if the calculated PLL configuration is more optimal compared to the
828 * best configuration and error found so far. Return the calculated error.
829 */
830 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
831 const struct dpll *calculated_clock,
832 const struct dpll *best_clock,
833 unsigned int best_error_ppm,
834 unsigned int *error_ppm)
835 {
836 /*
837 * For CHV ignore the error and consider only the P value.
838 * Prefer a bigger P value based on HW requirements.
839 */
840 if (IS_CHERRYVIEW(to_i915(dev))) {
841 *error_ppm = 0;
842
843 return calculated_clock->p > best_clock->p;
844 }
845
846 if (WARN_ON_ONCE(!target_freq))
847 return false;
848
849 *error_ppm = div_u64(1000000ULL *
850 abs(target_freq - calculated_clock->dot),
851 target_freq);
852 /*
853 * Prefer a better P value over a better (smaller) error if the error
854 * is small. Ensure this preference for future configurations too by
855 * setting the error to 0.
856 */
857 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
858 *error_ppm = 0;
859
860 return true;
861 }
862
863 return *error_ppm + 10 < best_error_ppm;
864 }
865
866 /*
867 * Returns a set of divisors for the desired target clock with the given
868 * refclk, or FALSE. The returned values represent the clock equation:
869 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
870 */
871 static bool
872 vlv_find_best_dpll(const struct intel_limit *limit,
873 struct intel_crtc_state *crtc_state,
874 int target, int refclk, struct dpll *match_clock,
875 struct dpll *best_clock)
876 {
877 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
878 struct drm_device *dev = crtc->base.dev;
879 struct dpll clock;
880 unsigned int bestppm = 1000000;
881 /* min update 19.2 MHz */
882 int max_n = min(limit->n.max, refclk / 19200);
883 bool found = false;
884
885 target *= 5; /* fast clock */
886
887 memset(best_clock, 0, sizeof(*best_clock));
888
889 /* based on hardware requirement, prefer smaller n to precision */
890 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
891 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
892 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
893 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
894 clock.p = clock.p1 * clock.p2;
895 /* based on hardware requirement, prefer bigger m1,m2 values */
896 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
897 unsigned int ppm;
898
899 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
900 refclk * clock.m1);
901
902 vlv_calc_dpll_params(refclk, &clock);
903
904 if (!intel_PLL_is_valid(to_i915(dev),
905 limit,
906 &clock))
907 continue;
908
909 if (!vlv_PLL_is_optimal(dev, target,
910 &clock,
911 best_clock,
912 bestppm, &ppm))
913 continue;
914
915 *best_clock = clock;
916 bestppm = ppm;
917 found = true;
918 }
919 }
920 }
921 }
922
923 return found;
924 }
925
926 /*
927 * Returns a set of divisors for the desired target clock with the given
928 * refclk, or FALSE. The returned values represent the clock equation:
929 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
930 */
931 static bool
932 chv_find_best_dpll(const struct intel_limit *limit,
933 struct intel_crtc_state *crtc_state,
934 int target, int refclk, struct dpll *match_clock,
935 struct dpll *best_clock)
936 {
937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
938 struct drm_device *dev = crtc->base.dev;
939 unsigned int best_error_ppm;
940 struct dpll clock;
941 u64 m2;
942 int found = false;
943
944 memset(best_clock, 0, sizeof(*best_clock));
945 best_error_ppm = 1000000;
946
947 /*
948 * Based on hardware doc, the n always set to 1, and m1 always
949 * set to 2. If requires to support 200Mhz refclk, we need to
950 * revisit this because n may not 1 anymore.
951 */
952 clock.n = 1, clock.m1 = 2;
953 target *= 5; /* fast clock */
954
955 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
956 for (clock.p2 = limit->p2.p2_fast;
957 clock.p2 >= limit->p2.p2_slow;
958 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
959 unsigned int error_ppm;
960
961 clock.p = clock.p1 * clock.p2;
962
963 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
964 refclk * clock.m1);
965
966 if (m2 > INT_MAX/clock.m1)
967 continue;
968
969 clock.m2 = m2;
970
971 chv_calc_dpll_params(refclk, &clock);
972
973 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
974 continue;
975
976 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
977 best_error_ppm, &error_ppm))
978 continue;
979
980 *best_clock = clock;
981 best_error_ppm = error_ppm;
982 found = true;
983 }
984 }
985
986 return found;
987 }
988
989 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
990 struct dpll *best_clock)
991 {
992 int refclk = 100000;
993 const struct intel_limit *limit = &intel_limits_bxt;
994
995 return chv_find_best_dpll(limit, crtc_state,
996 crtc_state->port_clock, refclk,
997 NULL, best_clock);
998 }
999
1000 bool intel_crtc_active(struct intel_crtc *crtc)
1001 {
1002 /* Be paranoid as we can arrive here with only partial
1003 * state retrieved from the hardware during setup.
1004 *
1005 * We can ditch the adjusted_mode.crtc_clock check as soon
1006 * as Haswell has gained clock readout/fastboot support.
1007 *
1008 * We can ditch the crtc->primary->state->fb check as soon as we can
1009 * properly reconstruct framebuffers.
1010 *
1011 * FIXME: The intel_crtc->active here should be switched to
1012 * crtc->state->active once we have proper CRTC states wired up
1013 * for atomic.
1014 */
1015 return crtc->active && crtc->base.primary->state->fb &&
1016 crtc->config->base.adjusted_mode.crtc_clock;
1017 }
1018
1019 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1020 enum pipe pipe)
1021 {
1022 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1023
1024 return crtc->config->cpu_transcoder;
1025 }
1026
1027 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1028 enum pipe pipe)
1029 {
1030 i915_reg_t reg = PIPEDSL(pipe);
1031 u32 line1, line2;
1032 u32 line_mask;
1033
1034 if (IS_GEN(dev_priv, 2))
1035 line_mask = DSL_LINEMASK_GEN2;
1036 else
1037 line_mask = DSL_LINEMASK_GEN3;
1038
1039 line1 = I915_READ(reg) & line_mask;
1040 msleep(5);
1041 line2 = I915_READ(reg) & line_mask;
1042
1043 return line1 != line2;
1044 }
1045
1046 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1047 {
1048 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1049 enum pipe pipe = crtc->pipe;
1050
1051 /* Wait for the display line to settle/start moving */
1052 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1053 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1054 pipe_name(pipe), onoff(state));
1055 }
1056
1057 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1058 {
1059 wait_for_pipe_scanline_moving(crtc, false);
1060 }
1061
1062 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1063 {
1064 wait_for_pipe_scanline_moving(crtc, true);
1065 }
1066
1067 static void
1068 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1069 {
1070 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1072
1073 if (INTEL_GEN(dev_priv) >= 4) {
1074 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1075 i915_reg_t reg = PIPECONF(cpu_transcoder);
1076
1077 /* Wait for the Pipe State to go off */
1078 if (intel_wait_for_register(&dev_priv->uncore,
1079 reg, I965_PIPECONF_ACTIVE, 0,
1080 100))
1081 WARN(1, "pipe_off wait timed out\n");
1082 } else {
1083 intel_wait_for_pipe_scanline_stopped(crtc);
1084 }
1085 }
1086
1087 /* Only for pre-ILK configs */
1088 void assert_pll(struct drm_i915_private *dev_priv,
1089 enum pipe pipe, bool state)
1090 {
1091 u32 val;
1092 bool cur_state;
1093
1094 val = I915_READ(DPLL(pipe));
1095 cur_state = !!(val & DPLL_VCO_ENABLE);
1096 I915_STATE_WARN(cur_state != state,
1097 "PLL state assertion failure (expected %s, current %s)\n",
1098 onoff(state), onoff(cur_state));
1099 }
1100
1101 /* XXX: the dsi pll is shared between MIPI DSI ports */
1102 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1103 {
1104 u32 val;
1105 bool cur_state;
1106
1107 vlv_cck_get(dev_priv);
1108 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1109 vlv_cck_put(dev_priv);
1110
1111 cur_state = val & DSI_PLL_VCO_EN;
1112 I915_STATE_WARN(cur_state != state,
1113 "DSI PLL state assertion failure (expected %s, current %s)\n",
1114 onoff(state), onoff(cur_state));
1115 }
1116
1117 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1118 enum pipe pipe, bool state)
1119 {
1120 bool cur_state;
1121 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1122 pipe);
1123
1124 if (HAS_DDI(dev_priv)) {
1125 /* DDI does not have a specific FDI_TX register */
1126 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1127 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1128 } else {
1129 u32 val = I915_READ(FDI_TX_CTL(pipe));
1130 cur_state = !!(val & FDI_TX_ENABLE);
1131 }
1132 I915_STATE_WARN(cur_state != state,
1133 "FDI TX state assertion failure (expected %s, current %s)\n",
1134 onoff(state), onoff(cur_state));
1135 }
1136 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1137 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1138
1139 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1140 enum pipe pipe, bool state)
1141 {
1142 u32 val;
1143 bool cur_state;
1144
1145 val = I915_READ(FDI_RX_CTL(pipe));
1146 cur_state = !!(val & FDI_RX_ENABLE);
1147 I915_STATE_WARN(cur_state != state,
1148 "FDI RX state assertion failure (expected %s, current %s)\n",
1149 onoff(state), onoff(cur_state));
1150 }
1151 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1152 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1153
1154 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1155 enum pipe pipe)
1156 {
1157 u32 val;
1158
1159 /* ILK FDI PLL is always enabled */
1160 if (IS_GEN(dev_priv, 5))
1161 return;
1162
1163 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1164 if (HAS_DDI(dev_priv))
1165 return;
1166
1167 val = I915_READ(FDI_TX_CTL(pipe));
1168 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1169 }
1170
1171 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1172 enum pipe pipe, bool state)
1173 {
1174 u32 val;
1175 bool cur_state;
1176
1177 val = I915_READ(FDI_RX_CTL(pipe));
1178 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1179 I915_STATE_WARN(cur_state != state,
1180 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1181 onoff(state), onoff(cur_state));
1182 }
1183
1184 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1185 {
1186 i915_reg_t pp_reg;
1187 u32 val;
1188 enum pipe panel_pipe = INVALID_PIPE;
1189 bool locked = true;
1190
1191 if (WARN_ON(HAS_DDI(dev_priv)))
1192 return;
1193
1194 if (HAS_PCH_SPLIT(dev_priv)) {
1195 u32 port_sel;
1196
1197 pp_reg = PP_CONTROL(0);
1198 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1199
1200 switch (port_sel) {
1201 case PANEL_PORT_SELECT_LVDS:
1202 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1203 break;
1204 case PANEL_PORT_SELECT_DPA:
1205 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1206 break;
1207 case PANEL_PORT_SELECT_DPC:
1208 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1209 break;
1210 case PANEL_PORT_SELECT_DPD:
1211 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1212 break;
1213 default:
1214 MISSING_CASE(port_sel);
1215 break;
1216 }
1217 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1218 /* presumably write lock depends on pipe, not port select */
1219 pp_reg = PP_CONTROL(pipe);
1220 panel_pipe = pipe;
1221 } else {
1222 u32 port_sel;
1223
1224 pp_reg = PP_CONTROL(0);
1225 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1226
1227 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1228 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1229 }
1230
1231 val = I915_READ(pp_reg);
1232 if (!(val & PANEL_POWER_ON) ||
1233 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1234 locked = false;
1235
1236 I915_STATE_WARN(panel_pipe == pipe && locked,
1237 "panel assertion failure, pipe %c regs locked\n",
1238 pipe_name(pipe));
1239 }
1240
1241 void assert_pipe(struct drm_i915_private *dev_priv,
1242 enum pipe pipe, bool state)
1243 {
1244 bool cur_state;
1245 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1246 pipe);
1247 enum intel_display_power_domain power_domain;
1248 intel_wakeref_t wakeref;
1249
1250 /* we keep both pipes enabled on 830 */
1251 if (IS_I830(dev_priv))
1252 state = true;
1253
1254 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1255 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1256 if (wakeref) {
1257 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1258 cur_state = !!(val & PIPECONF_ENABLE);
1259
1260 intel_display_power_put(dev_priv, power_domain, wakeref);
1261 } else {
1262 cur_state = false;
1263 }
1264
1265 I915_STATE_WARN(cur_state != state,
1266 "pipe %c assertion failure (expected %s, current %s)\n",
1267 pipe_name(pipe), onoff(state), onoff(cur_state));
1268 }
1269
1270 static void assert_plane(struct intel_plane *plane, bool state)
1271 {
1272 enum pipe pipe;
1273 bool cur_state;
1274
1275 cur_state = plane->get_hw_state(plane, &pipe);
1276
1277 I915_STATE_WARN(cur_state != state,
1278 "%s assertion failure (expected %s, current %s)\n",
1279 plane->base.name, onoff(state), onoff(cur_state));
1280 }
1281
1282 #define assert_plane_enabled(p) assert_plane(p, true)
1283 #define assert_plane_disabled(p) assert_plane(p, false)
1284
1285 static void assert_planes_disabled(struct intel_crtc *crtc)
1286 {
1287 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1288 struct intel_plane *plane;
1289
1290 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1291 assert_plane_disabled(plane);
1292 }
1293
1294 static void assert_vblank_disabled(struct drm_crtc *crtc)
1295 {
1296 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1297 drm_crtc_vblank_put(crtc);
1298 }
1299
1300 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1301 enum pipe pipe)
1302 {
1303 u32 val;
1304 bool enabled;
1305
1306 val = I915_READ(PCH_TRANSCONF(pipe));
1307 enabled = !!(val & TRANS_ENABLE);
1308 I915_STATE_WARN(enabled,
1309 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1310 pipe_name(pipe));
1311 }
1312
1313 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1314 enum pipe pipe, enum port port,
1315 i915_reg_t dp_reg)
1316 {
1317 enum pipe port_pipe;
1318 bool state;
1319
1320 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1321
1322 I915_STATE_WARN(state && port_pipe == pipe,
1323 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1324 port_name(port), pipe_name(pipe));
1325
1326 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1327 "IBX PCH DP %c still using transcoder B\n",
1328 port_name(port));
1329 }
1330
1331 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1332 enum pipe pipe, enum port port,
1333 i915_reg_t hdmi_reg)
1334 {
1335 enum pipe port_pipe;
1336 bool state;
1337
1338 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1339
1340 I915_STATE_WARN(state && port_pipe == pipe,
1341 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1342 port_name(port), pipe_name(pipe));
1343
1344 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1345 "IBX PCH HDMI %c still using transcoder B\n",
1346 port_name(port));
1347 }
1348
1349 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1350 enum pipe pipe)
1351 {
1352 enum pipe port_pipe;
1353
1354 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1357
1358 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1359 port_pipe == pipe,
1360 "PCH VGA enabled on transcoder %c, should be disabled\n",
1361 pipe_name(pipe));
1362
1363 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1364 port_pipe == pipe,
1365 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1366 pipe_name(pipe));
1367
1368 /* PCH SDVOB multiplex with HDMIB */
1369 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1372 }
1373
1374 static void _vlv_enable_pll(struct intel_crtc *crtc,
1375 const struct intel_crtc_state *pipe_config)
1376 {
1377 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1378 enum pipe pipe = crtc->pipe;
1379
1380 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1381 POSTING_READ(DPLL(pipe));
1382 udelay(150);
1383
1384 if (intel_wait_for_register(&dev_priv->uncore,
1385 DPLL(pipe),
1386 DPLL_LOCK_VLV,
1387 DPLL_LOCK_VLV,
1388 1))
1389 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1390 }
1391
1392 static void vlv_enable_pll(struct intel_crtc *crtc,
1393 const struct intel_crtc_state *pipe_config)
1394 {
1395 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1396 enum pipe pipe = crtc->pipe;
1397
1398 assert_pipe_disabled(dev_priv, pipe);
1399
1400 /* PLL is protected by panel, make sure we can write it */
1401 assert_panel_unlocked(dev_priv, pipe);
1402
1403 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1404 _vlv_enable_pll(crtc, pipe_config);
1405
1406 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1407 POSTING_READ(DPLL_MD(pipe));
1408 }
1409
1410
1411 static void _chv_enable_pll(struct intel_crtc *crtc,
1412 const struct intel_crtc_state *pipe_config)
1413 {
1414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1415 enum pipe pipe = crtc->pipe;
1416 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1417 u32 tmp;
1418
1419 vlv_dpio_get(dev_priv);
1420
1421 /* Enable back the 10bit clock to display controller */
1422 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1423 tmp |= DPIO_DCLKP_EN;
1424 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1425
1426 vlv_dpio_put(dev_priv);
1427
1428 /*
1429 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1430 */
1431 udelay(1);
1432
1433 /* Enable PLL */
1434 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1435
1436 /* Check PLL is locked */
1437 if (intel_wait_for_register(&dev_priv->uncore,
1438 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1439 1))
1440 DRM_ERROR("PLL %d failed to lock\n", pipe);
1441 }
1442
1443 static void chv_enable_pll(struct intel_crtc *crtc,
1444 const struct intel_crtc_state *pipe_config)
1445 {
1446 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1447 enum pipe pipe = crtc->pipe;
1448
1449 assert_pipe_disabled(dev_priv, pipe);
1450
1451 /* PLL is protected by panel, make sure we can write it */
1452 assert_panel_unlocked(dev_priv, pipe);
1453
1454 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1455 _chv_enable_pll(crtc, pipe_config);
1456
1457 if (pipe != PIPE_A) {
1458 /*
1459 * WaPixelRepeatModeFixForC0:chv
1460 *
1461 * DPLLCMD is AWOL. Use chicken bits to propagate
1462 * the value from DPLLBMD to either pipe B or C.
1463 */
1464 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1465 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1466 I915_WRITE(CBR4_VLV, 0);
1467 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1468
1469 /*
1470 * DPLLB VGA mode also seems to cause problems.
1471 * We should always have it disabled.
1472 */
1473 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1474 } else {
1475 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1476 POSTING_READ(DPLL_MD(pipe));
1477 }
1478 }
1479
1480 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1481 {
1482 if (IS_I830(dev_priv))
1483 return false;
1484
1485 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1486 }
1487
1488 static void i9xx_enable_pll(struct intel_crtc *crtc,
1489 const struct intel_crtc_state *crtc_state)
1490 {
1491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1492 i915_reg_t reg = DPLL(crtc->pipe);
1493 u32 dpll = crtc_state->dpll_hw_state.dpll;
1494 int i;
1495
1496 assert_pipe_disabled(dev_priv, crtc->pipe);
1497
1498 /* PLL is protected by panel, make sure we can write it */
1499 if (i9xx_has_pps(dev_priv))
1500 assert_panel_unlocked(dev_priv, crtc->pipe);
1501
1502 /*
1503 * Apparently we need to have VGA mode enabled prior to changing
1504 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1505 * dividers, even though the register value does change.
1506 */
1507 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1508 I915_WRITE(reg, dpll);
1509
1510 /* Wait for the clocks to stabilize. */
1511 POSTING_READ(reg);
1512 udelay(150);
1513
1514 if (INTEL_GEN(dev_priv) >= 4) {
1515 I915_WRITE(DPLL_MD(crtc->pipe),
1516 crtc_state->dpll_hw_state.dpll_md);
1517 } else {
1518 /* The pixel multiplier can only be updated once the
1519 * DPLL is enabled and the clocks are stable.
1520 *
1521 * So write it again.
1522 */
1523 I915_WRITE(reg, dpll);
1524 }
1525
1526 /* We do this three times for luck */
1527 for (i = 0; i < 3; i++) {
1528 I915_WRITE(reg, dpll);
1529 POSTING_READ(reg);
1530 udelay(150); /* wait for warmup */
1531 }
1532 }
1533
1534 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1535 {
1536 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1537 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1538 enum pipe pipe = crtc->pipe;
1539
1540 /* Don't disable pipe or pipe PLLs if needed */
1541 if (IS_I830(dev_priv))
1542 return;
1543
1544 /* Make sure the pipe isn't still relying on us */
1545 assert_pipe_disabled(dev_priv, pipe);
1546
1547 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1548 POSTING_READ(DPLL(pipe));
1549 }
1550
1551 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1552 {
1553 u32 val;
1554
1555 /* Make sure the pipe isn't still relying on us */
1556 assert_pipe_disabled(dev_priv, pipe);
1557
1558 val = DPLL_INTEGRATED_REF_CLK_VLV |
1559 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1560 if (pipe != PIPE_A)
1561 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1562
1563 I915_WRITE(DPLL(pipe), val);
1564 POSTING_READ(DPLL(pipe));
1565 }
1566
1567 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1568 {
1569 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1570 u32 val;
1571
1572 /* Make sure the pipe isn't still relying on us */
1573 assert_pipe_disabled(dev_priv, pipe);
1574
1575 val = DPLL_SSC_REF_CLK_CHV |
1576 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1577 if (pipe != PIPE_A)
1578 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1579
1580 I915_WRITE(DPLL(pipe), val);
1581 POSTING_READ(DPLL(pipe));
1582
1583 vlv_dpio_get(dev_priv);
1584
1585 /* Disable 10bit clock to display controller */
1586 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1587 val &= ~DPIO_DCLKP_EN;
1588 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1589
1590 vlv_dpio_put(dev_priv);
1591 }
1592
1593 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1594 struct intel_digital_port *dport,
1595 unsigned int expected_mask)
1596 {
1597 u32 port_mask;
1598 i915_reg_t dpll_reg;
1599
1600 switch (dport->base.port) {
1601 case PORT_B:
1602 port_mask = DPLL_PORTB_READY_MASK;
1603 dpll_reg = DPLL(0);
1604 break;
1605 case PORT_C:
1606 port_mask = DPLL_PORTC_READY_MASK;
1607 dpll_reg = DPLL(0);
1608 expected_mask <<= 4;
1609 break;
1610 case PORT_D:
1611 port_mask = DPLL_PORTD_READY_MASK;
1612 dpll_reg = DPIO_PHY_STATUS;
1613 break;
1614 default:
1615 BUG();
1616 }
1617
1618 if (intel_wait_for_register(&dev_priv->uncore,
1619 dpll_reg, port_mask, expected_mask,
1620 1000))
1621 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1622 port_name(dport->base.port),
1623 I915_READ(dpll_reg) & port_mask, expected_mask);
1624 }
1625
1626 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1627 {
1628 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1629 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1630 enum pipe pipe = crtc->pipe;
1631 i915_reg_t reg;
1632 u32 val, pipeconf_val;
1633
1634 /* Make sure PCH DPLL is enabled */
1635 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1636
1637 /* FDI must be feeding us bits for PCH ports */
1638 assert_fdi_tx_enabled(dev_priv, pipe);
1639 assert_fdi_rx_enabled(dev_priv, pipe);
1640
1641 if (HAS_PCH_CPT(dev_priv)) {
1642 /* Workaround: Set the timing override bit before enabling the
1643 * pch transcoder. */
1644 reg = TRANS_CHICKEN2(pipe);
1645 val = I915_READ(reg);
1646 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1647 I915_WRITE(reg, val);
1648 }
1649
1650 reg = PCH_TRANSCONF(pipe);
1651 val = I915_READ(reg);
1652 pipeconf_val = I915_READ(PIPECONF(pipe));
1653
1654 if (HAS_PCH_IBX(dev_priv)) {
1655 /*
1656 * Make the BPC in transcoder be consistent with
1657 * that in pipeconf reg. For HDMI we must use 8bpc
1658 * here for both 8bpc and 12bpc.
1659 */
1660 val &= ~PIPECONF_BPC_MASK;
1661 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1662 val |= PIPECONF_8BPC;
1663 else
1664 val |= pipeconf_val & PIPECONF_BPC_MASK;
1665 }
1666
1667 val &= ~TRANS_INTERLACE_MASK;
1668 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1669 if (HAS_PCH_IBX(dev_priv) &&
1670 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1671 val |= TRANS_LEGACY_INTERLACED_ILK;
1672 else
1673 val |= TRANS_INTERLACED;
1674 } else {
1675 val |= TRANS_PROGRESSIVE;
1676 }
1677
1678 I915_WRITE(reg, val | TRANS_ENABLE);
1679 if (intel_wait_for_register(&dev_priv->uncore,
1680 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1681 100))
1682 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1683 }
1684
1685 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1686 enum transcoder cpu_transcoder)
1687 {
1688 u32 val, pipeconf_val;
1689
1690 /* FDI must be feeding us bits for PCH ports */
1691 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1692 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1693
1694 /* Workaround: set timing override bit. */
1695 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1696 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1697 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1698
1699 val = TRANS_ENABLE;
1700 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1701
1702 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1703 PIPECONF_INTERLACED_ILK)
1704 val |= TRANS_INTERLACED;
1705 else
1706 val |= TRANS_PROGRESSIVE;
1707
1708 I915_WRITE(LPT_TRANSCONF, val);
1709 if (intel_wait_for_register(&dev_priv->uncore,
1710 LPT_TRANSCONF,
1711 TRANS_STATE_ENABLE,
1712 TRANS_STATE_ENABLE,
1713 100))
1714 DRM_ERROR("Failed to enable PCH transcoder\n");
1715 }
1716
1717 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1718 enum pipe pipe)
1719 {
1720 i915_reg_t reg;
1721 u32 val;
1722
1723 /* FDI relies on the transcoder */
1724 assert_fdi_tx_disabled(dev_priv, pipe);
1725 assert_fdi_rx_disabled(dev_priv, pipe);
1726
1727 /* Ports must be off as well */
1728 assert_pch_ports_disabled(dev_priv, pipe);
1729
1730 reg = PCH_TRANSCONF(pipe);
1731 val = I915_READ(reg);
1732 val &= ~TRANS_ENABLE;
1733 I915_WRITE(reg, val);
1734 /* wait for PCH transcoder off, transcoder state */
1735 if (intel_wait_for_register(&dev_priv->uncore,
1736 reg, TRANS_STATE_ENABLE, 0,
1737 50))
1738 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1739
1740 if (HAS_PCH_CPT(dev_priv)) {
1741 /* Workaround: Clear the timing override chicken bit again. */
1742 reg = TRANS_CHICKEN2(pipe);
1743 val = I915_READ(reg);
1744 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1745 I915_WRITE(reg, val);
1746 }
1747 }
1748
1749 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1750 {
1751 u32 val;
1752
1753 val = I915_READ(LPT_TRANSCONF);
1754 val &= ~TRANS_ENABLE;
1755 I915_WRITE(LPT_TRANSCONF, val);
1756 /* wait for PCH transcoder off, transcoder state */
1757 if (intel_wait_for_register(&dev_priv->uncore,
1758 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1759 50))
1760 DRM_ERROR("Failed to disable PCH transcoder\n");
1761
1762 /* Workaround: clear timing override bit. */
1763 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1764 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1765 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1766 }
1767
1768 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1769 {
1770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1771
1772 if (HAS_PCH_LPT(dev_priv))
1773 return PIPE_A;
1774 else
1775 return crtc->pipe;
1776 }
1777
1778 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1779 {
1780 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1781
1782 /*
1783 * On i965gm the hardware frame counter reads
1784 * zero when the TV encoder is enabled :(
1785 */
1786 if (IS_I965GM(dev_priv) &&
1787 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1788 return 0;
1789
1790 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1791 return 0xffffffff; /* full 32 bit counter */
1792 else if (INTEL_GEN(dev_priv) >= 3)
1793 return 0xffffff; /* only 24 bits of frame count */
1794 else
1795 return 0; /* Gen2 doesn't have a hardware frame counter */
1796 }
1797
1798 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1799 {
1800 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1801
1802 drm_crtc_set_max_vblank_count(&crtc->base,
1803 intel_crtc_max_vblank_count(crtc_state));
1804 drm_crtc_vblank_on(&crtc->base);
1805 }
1806
1807 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1808 {
1809 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1810 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1811 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1812 enum pipe pipe = crtc->pipe;
1813 i915_reg_t reg;
1814 u32 val;
1815
1816 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1817
1818 assert_planes_disabled(crtc);
1819
1820 /*
1821 * A pipe without a PLL won't actually be able to drive bits from
1822 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1823 * need the check.
1824 */
1825 if (HAS_GMCH(dev_priv)) {
1826 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1827 assert_dsi_pll_enabled(dev_priv);
1828 else
1829 assert_pll_enabled(dev_priv, pipe);
1830 } else {
1831 if (new_crtc_state->has_pch_encoder) {
1832 /* if driving the PCH, we need FDI enabled */
1833 assert_fdi_rx_pll_enabled(dev_priv,
1834 intel_crtc_pch_transcoder(crtc));
1835 assert_fdi_tx_pll_enabled(dev_priv,
1836 (enum pipe) cpu_transcoder);
1837 }
1838 /* FIXME: assert CPU port conditions for SNB+ */
1839 }
1840
1841 trace_intel_pipe_enable(dev_priv, pipe);
1842
1843 reg = PIPECONF(cpu_transcoder);
1844 val = I915_READ(reg);
1845 if (val & PIPECONF_ENABLE) {
1846 /* we keep both pipes enabled on 830 */
1847 WARN_ON(!IS_I830(dev_priv));
1848 return;
1849 }
1850
1851 I915_WRITE(reg, val | PIPECONF_ENABLE);
1852 POSTING_READ(reg);
1853
1854 /*
1855 * Until the pipe starts PIPEDSL reads will return a stale value,
1856 * which causes an apparent vblank timestamp jump when PIPEDSL
1857 * resets to its proper value. That also messes up the frame count
1858 * when it's derived from the timestamps. So let's wait for the
1859 * pipe to start properly before we call drm_crtc_vblank_on()
1860 */
1861 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1862 intel_wait_for_pipe_scanline_moving(crtc);
1863 }
1864
1865 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1866 {
1867 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1869 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1870 enum pipe pipe = crtc->pipe;
1871 i915_reg_t reg;
1872 u32 val;
1873
1874 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1875
1876 /*
1877 * Make sure planes won't keep trying to pump pixels to us,
1878 * or we might hang the display.
1879 */
1880 assert_planes_disabled(crtc);
1881
1882 trace_intel_pipe_disable(dev_priv, pipe);
1883
1884 reg = PIPECONF(cpu_transcoder);
1885 val = I915_READ(reg);
1886 if ((val & PIPECONF_ENABLE) == 0)
1887 return;
1888
1889 /*
1890 * Double wide has implications for planes
1891 * so best keep it disabled when not needed.
1892 */
1893 if (old_crtc_state->double_wide)
1894 val &= ~PIPECONF_DOUBLE_WIDE;
1895
1896 /* Don't disable pipe or pipe PLLs if needed */
1897 if (!IS_I830(dev_priv))
1898 val &= ~PIPECONF_ENABLE;
1899
1900 I915_WRITE(reg, val);
1901 if ((val & PIPECONF_ENABLE) == 0)
1902 intel_wait_for_pipe_off(old_crtc_state);
1903 }
1904
1905 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1906 {
1907 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1908 }
1909
1910 static unsigned int
1911 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1912 {
1913 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1914 unsigned int cpp = fb->format->cpp[color_plane];
1915
1916 switch (fb->modifier) {
1917 case DRM_FORMAT_MOD_LINEAR:
1918 return intel_tile_size(dev_priv);
1919 case I915_FORMAT_MOD_X_TILED:
1920 if (IS_GEN(dev_priv, 2))
1921 return 128;
1922 else
1923 return 512;
1924 case I915_FORMAT_MOD_Y_TILED_CCS:
1925 if (color_plane == 1)
1926 return 128;
1927 /* fall through */
1928 case I915_FORMAT_MOD_Y_TILED:
1929 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1930 return 128;
1931 else
1932 return 512;
1933 case I915_FORMAT_MOD_Yf_TILED_CCS:
1934 if (color_plane == 1)
1935 return 128;
1936 /* fall through */
1937 case I915_FORMAT_MOD_Yf_TILED:
1938 switch (cpp) {
1939 case 1:
1940 return 64;
1941 case 2:
1942 case 4:
1943 return 128;
1944 case 8:
1945 case 16:
1946 return 256;
1947 default:
1948 MISSING_CASE(cpp);
1949 return cpp;
1950 }
1951 break;
1952 default:
1953 MISSING_CASE(fb->modifier);
1954 return cpp;
1955 }
1956 }
1957
1958 static unsigned int
1959 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1960 {
1961 return intel_tile_size(to_i915(fb->dev)) /
1962 intel_tile_width_bytes(fb, color_plane);
1963 }
1964
1965 /* Return the tile dimensions in pixel units */
1966 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1967 unsigned int *tile_width,
1968 unsigned int *tile_height)
1969 {
1970 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1971 unsigned int cpp = fb->format->cpp[color_plane];
1972
1973 *tile_width = tile_width_bytes / cpp;
1974 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1975 }
1976
1977 unsigned int
1978 intel_fb_align_height(const struct drm_framebuffer *fb,
1979 int color_plane, unsigned int height)
1980 {
1981 unsigned int tile_height = intel_tile_height(fb, color_plane);
1982
1983 return ALIGN(height, tile_height);
1984 }
1985
1986 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1987 {
1988 unsigned int size = 0;
1989 int i;
1990
1991 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1992 size += rot_info->plane[i].width * rot_info->plane[i].height;
1993
1994 return size;
1995 }
1996
1997 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1998 {
1999 unsigned int size = 0;
2000 int i;
2001
2002 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2003 size += rem_info->plane[i].width * rem_info->plane[i].height;
2004
2005 return size;
2006 }
2007
2008 static void
2009 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2010 const struct drm_framebuffer *fb,
2011 unsigned int rotation)
2012 {
2013 view->type = I915_GGTT_VIEW_NORMAL;
2014 if (drm_rotation_90_or_270(rotation)) {
2015 view->type = I915_GGTT_VIEW_ROTATED;
2016 view->rotated = to_intel_framebuffer(fb)->rot_info;
2017 }
2018 }
2019
2020 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2021 {
2022 if (IS_I830(dev_priv))
2023 return 16 * 1024;
2024 else if (IS_I85X(dev_priv))
2025 return 256;
2026 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2027 return 32;
2028 else
2029 return 4 * 1024;
2030 }
2031
2032 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2033 {
2034 if (INTEL_GEN(dev_priv) >= 9)
2035 return 256 * 1024;
2036 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2037 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2038 return 128 * 1024;
2039 else if (INTEL_GEN(dev_priv) >= 4)
2040 return 4 * 1024;
2041 else
2042 return 0;
2043 }
2044
2045 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2046 int color_plane)
2047 {
2048 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2049
2050 /* AUX_DIST needs only 4K alignment */
2051 if (color_plane == 1)
2052 return 4096;
2053
2054 switch (fb->modifier) {
2055 case DRM_FORMAT_MOD_LINEAR:
2056 return intel_linear_alignment(dev_priv);
2057 case I915_FORMAT_MOD_X_TILED:
2058 if (INTEL_GEN(dev_priv) >= 9)
2059 return 256 * 1024;
2060 return 0;
2061 case I915_FORMAT_MOD_Y_TILED_CCS:
2062 case I915_FORMAT_MOD_Yf_TILED_CCS:
2063 case I915_FORMAT_MOD_Y_TILED:
2064 case I915_FORMAT_MOD_Yf_TILED:
2065 return 1 * 1024 * 1024;
2066 default:
2067 MISSING_CASE(fb->modifier);
2068 return 0;
2069 }
2070 }
2071
2072 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2073 {
2074 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2075 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2076
2077 return INTEL_GEN(dev_priv) < 4 ||
2078 (plane->has_fbc &&
2079 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2080 }
2081
2082 struct i915_vma *
2083 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2084 const struct i915_ggtt_view *view,
2085 bool uses_fence,
2086 unsigned long *out_flags)
2087 {
2088 struct drm_device *dev = fb->dev;
2089 struct drm_i915_private *dev_priv = to_i915(dev);
2090 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2091 intel_wakeref_t wakeref;
2092 struct i915_vma *vma;
2093 unsigned int pinctl;
2094 u32 alignment;
2095
2096 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2097
2098 alignment = intel_surf_alignment(fb, 0);
2099
2100 /* Note that the w/a also requires 64 PTE of padding following the
2101 * bo. We currently fill all unused PTE with the shadow page and so
2102 * we should always have valid PTE following the scanout preventing
2103 * the VT-d warning.
2104 */
2105 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2106 alignment = 256 * 1024;
2107
2108 /*
2109 * Global gtt pte registers are special registers which actually forward
2110 * writes to a chunk of system memory. Which means that there is no risk
2111 * that the register values disappear as soon as we call
2112 * intel_runtime_pm_put(), so it is correct to wrap only the
2113 * pin/unpin/fence and not more.
2114 */
2115 wakeref = intel_runtime_pm_get(dev_priv);
2116 i915_gem_object_lock(obj);
2117
2118 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2119
2120 pinctl = 0;
2121
2122 /* Valleyview is definitely limited to scanning out the first
2123 * 512MiB. Lets presume this behaviour was inherited from the
2124 * g4x display engine and that all earlier gen are similarly
2125 * limited. Testing suggests that it is a little more
2126 * complicated than this. For example, Cherryview appears quite
2127 * happy to scanout from anywhere within its global aperture.
2128 */
2129 if (HAS_GMCH(dev_priv))
2130 pinctl |= PIN_MAPPABLE;
2131
2132 vma = i915_gem_object_pin_to_display_plane(obj,
2133 alignment, view, pinctl);
2134 if (IS_ERR(vma))
2135 goto err;
2136
2137 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2138 int ret;
2139
2140 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2141 * fence, whereas 965+ only requires a fence if using
2142 * framebuffer compression. For simplicity, we always, when
2143 * possible, install a fence as the cost is not that onerous.
2144 *
2145 * If we fail to fence the tiled scanout, then either the
2146 * modeset will reject the change (which is highly unlikely as
2147 * the affected systems, all but one, do not have unmappable
2148 * space) or we will not be able to enable full powersaving
2149 * techniques (also likely not to apply due to various limits
2150 * FBC and the like impose on the size of the buffer, which
2151 * presumably we violated anyway with this unmappable buffer).
2152 * Anyway, it is presumably better to stumble onwards with
2153 * something and try to run the system in a "less than optimal"
2154 * mode that matches the user configuration.
2155 */
2156 ret = i915_vma_pin_fence(vma);
2157 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2158 i915_gem_object_unpin_from_display_plane(vma);
2159 vma = ERR_PTR(ret);
2160 goto err;
2161 }
2162
2163 if (ret == 0 && vma->fence)
2164 *out_flags |= PLANE_HAS_FENCE;
2165 }
2166
2167 i915_vma_get(vma);
2168 err:
2169 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2170
2171 i915_gem_object_unlock(obj);
2172 intel_runtime_pm_put(dev_priv, wakeref);
2173 return vma;
2174 }
2175
2176 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2177 {
2178 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2179
2180 i915_gem_object_lock(vma->obj);
2181 if (flags & PLANE_HAS_FENCE)
2182 i915_vma_unpin_fence(vma);
2183 i915_gem_object_unpin_from_display_plane(vma);
2184 i915_gem_object_unlock(vma->obj);
2185
2186 i915_vma_put(vma);
2187 }
2188
2189 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2190 unsigned int rotation)
2191 {
2192 if (drm_rotation_90_or_270(rotation))
2193 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2194 else
2195 return fb->pitches[color_plane];
2196 }
2197
2198 /*
2199 * Convert the x/y offsets into a linear offset.
2200 * Only valid with 0/180 degree rotation, which is fine since linear
2201 * offset is only used with linear buffers on pre-hsw and tiled buffers
2202 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2203 */
2204 u32 intel_fb_xy_to_linear(int x, int y,
2205 const struct intel_plane_state *state,
2206 int color_plane)
2207 {
2208 const struct drm_framebuffer *fb = state->base.fb;
2209 unsigned int cpp = fb->format->cpp[color_plane];
2210 unsigned int pitch = state->color_plane[color_plane].stride;
2211
2212 return y * pitch + x * cpp;
2213 }
2214
2215 /*
2216 * Add the x/y offsets derived from fb->offsets[] to the user
2217 * specified plane src x/y offsets. The resulting x/y offsets
2218 * specify the start of scanout from the beginning of the gtt mapping.
2219 */
2220 void intel_add_fb_offsets(int *x, int *y,
2221 const struct intel_plane_state *state,
2222 int color_plane)
2223
2224 {
2225 *x += state->color_plane[color_plane].x;
2226 *y += state->color_plane[color_plane].y;
2227 }
2228
2229 static u32 intel_adjust_tile_offset(int *x, int *y,
2230 unsigned int tile_width,
2231 unsigned int tile_height,
2232 unsigned int tile_size,
2233 unsigned int pitch_tiles,
2234 u32 old_offset,
2235 u32 new_offset)
2236 {
2237 unsigned int pitch_pixels = pitch_tiles * tile_width;
2238 unsigned int tiles;
2239
2240 WARN_ON(old_offset & (tile_size - 1));
2241 WARN_ON(new_offset & (tile_size - 1));
2242 WARN_ON(new_offset > old_offset);
2243
2244 tiles = (old_offset - new_offset) / tile_size;
2245
2246 *y += tiles / pitch_tiles * tile_height;
2247 *x += tiles % pitch_tiles * tile_width;
2248
2249 /* minimize x in case it got needlessly big */
2250 *y += *x / pitch_pixels * tile_height;
2251 *x %= pitch_pixels;
2252
2253 return new_offset;
2254 }
2255
2256 static bool is_surface_linear(u64 modifier, int color_plane)
2257 {
2258 return modifier == DRM_FORMAT_MOD_LINEAR;
2259 }
2260
2261 static u32 intel_adjust_aligned_offset(int *x, int *y,
2262 const struct drm_framebuffer *fb,
2263 int color_plane,
2264 unsigned int rotation,
2265 unsigned int pitch,
2266 u32 old_offset, u32 new_offset)
2267 {
2268 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2269 unsigned int cpp = fb->format->cpp[color_plane];
2270
2271 WARN_ON(new_offset > old_offset);
2272
2273 if (!is_surface_linear(fb->modifier, color_plane)) {
2274 unsigned int tile_size, tile_width, tile_height;
2275 unsigned int pitch_tiles;
2276
2277 tile_size = intel_tile_size(dev_priv);
2278 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2279
2280 if (drm_rotation_90_or_270(rotation)) {
2281 pitch_tiles = pitch / tile_height;
2282 swap(tile_width, tile_height);
2283 } else {
2284 pitch_tiles = pitch / (tile_width * cpp);
2285 }
2286
2287 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2288 tile_size, pitch_tiles,
2289 old_offset, new_offset);
2290 } else {
2291 old_offset += *y * pitch + *x * cpp;
2292
2293 *y = (old_offset - new_offset) / pitch;
2294 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2295 }
2296
2297 return new_offset;
2298 }
2299
2300 /*
2301 * Adjust the tile offset by moving the difference into
2302 * the x/y offsets.
2303 */
2304 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2305 const struct intel_plane_state *state,
2306 int color_plane,
2307 u32 old_offset, u32 new_offset)
2308 {
2309 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2310 state->base.rotation,
2311 state->color_plane[color_plane].stride,
2312 old_offset, new_offset);
2313 }
2314
2315 /*
2316 * Computes the aligned offset to the base tile and adjusts
2317 * x, y. bytes per pixel is assumed to be a power-of-two.
2318 *
2319 * In the 90/270 rotated case, x and y are assumed
2320 * to be already rotated to match the rotated GTT view, and
2321 * pitch is the tile_height aligned framebuffer height.
2322 *
2323 * This function is used when computing the derived information
2324 * under intel_framebuffer, so using any of that information
2325 * here is not allowed. Anything under drm_framebuffer can be
2326 * used. This is why the user has to pass in the pitch since it
2327 * is specified in the rotated orientation.
2328 */
2329 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2330 int *x, int *y,
2331 const struct drm_framebuffer *fb,
2332 int color_plane,
2333 unsigned int pitch,
2334 unsigned int rotation,
2335 u32 alignment)
2336 {
2337 unsigned int cpp = fb->format->cpp[color_plane];
2338 u32 offset, offset_aligned;
2339
2340 if (alignment)
2341 alignment--;
2342
2343 if (!is_surface_linear(fb->modifier, color_plane)) {
2344 unsigned int tile_size, tile_width, tile_height;
2345 unsigned int tile_rows, tiles, pitch_tiles;
2346
2347 tile_size = intel_tile_size(dev_priv);
2348 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2349
2350 if (drm_rotation_90_or_270(rotation)) {
2351 pitch_tiles = pitch / tile_height;
2352 swap(tile_width, tile_height);
2353 } else {
2354 pitch_tiles = pitch / (tile_width * cpp);
2355 }
2356
2357 tile_rows = *y / tile_height;
2358 *y %= tile_height;
2359
2360 tiles = *x / tile_width;
2361 *x %= tile_width;
2362
2363 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2364 offset_aligned = offset & ~alignment;
2365
2366 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2367 tile_size, pitch_tiles,
2368 offset, offset_aligned);
2369 } else {
2370 offset = *y * pitch + *x * cpp;
2371 offset_aligned = offset & ~alignment;
2372
2373 *y = (offset & alignment) / pitch;
2374 *x = ((offset & alignment) - *y * pitch) / cpp;
2375 }
2376
2377 return offset_aligned;
2378 }
2379
2380 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2381 const struct intel_plane_state *state,
2382 int color_plane)
2383 {
2384 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2385 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2386 const struct drm_framebuffer *fb = state->base.fb;
2387 unsigned int rotation = state->base.rotation;
2388 int pitch = state->color_plane[color_plane].stride;
2389 u32 alignment;
2390
2391 if (intel_plane->id == PLANE_CURSOR)
2392 alignment = intel_cursor_alignment(dev_priv);
2393 else
2394 alignment = intel_surf_alignment(fb, color_plane);
2395
2396 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2397 pitch, rotation, alignment);
2398 }
2399
2400 /* Convert the fb->offset[] into x/y offsets */
2401 static int intel_fb_offset_to_xy(int *x, int *y,
2402 const struct drm_framebuffer *fb,
2403 int color_plane)
2404 {
2405 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2406 unsigned int height;
2407
2408 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2409 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2410 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2411 fb->offsets[color_plane], color_plane);
2412 return -EINVAL;
2413 }
2414
2415 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2416 height = ALIGN(height, intel_tile_height(fb, color_plane));
2417
2418 /* Catch potential overflows early */
2419 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2420 fb->offsets[color_plane])) {
2421 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2422 fb->offsets[color_plane], fb->pitches[color_plane],
2423 color_plane);
2424 return -ERANGE;
2425 }
2426
2427 *x = 0;
2428 *y = 0;
2429
2430 intel_adjust_aligned_offset(x, y,
2431 fb, color_plane, DRM_MODE_ROTATE_0,
2432 fb->pitches[color_plane],
2433 fb->offsets[color_plane], 0);
2434
2435 return 0;
2436 }
2437
2438 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2439 {
2440 switch (fb_modifier) {
2441 case I915_FORMAT_MOD_X_TILED:
2442 return I915_TILING_X;
2443 case I915_FORMAT_MOD_Y_TILED:
2444 case I915_FORMAT_MOD_Y_TILED_CCS:
2445 return I915_TILING_Y;
2446 default:
2447 return I915_TILING_NONE;
2448 }
2449 }
2450
2451 /*
2452 * From the Sky Lake PRM:
2453 * "The Color Control Surface (CCS) contains the compression status of
2454 * the cache-line pairs. The compression state of the cache-line pair
2455 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2456 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2457 * cache-line-pairs. CCS is always Y tiled."
2458 *
2459 * Since cache line pairs refers to horizontally adjacent cache lines,
2460 * each cache line in the CCS corresponds to an area of 32x16 cache
2461 * lines on the main surface. Since each pixel is 4 bytes, this gives
2462 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2463 * main surface.
2464 */
2465 static const struct drm_format_info ccs_formats[] = {
2466 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2467 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2468 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2469 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2470 };
2471
2472 static const struct drm_format_info *
2473 lookup_format_info(const struct drm_format_info formats[],
2474 int num_formats, u32 format)
2475 {
2476 int i;
2477
2478 for (i = 0; i < num_formats; i++) {
2479 if (formats[i].format == format)
2480 return &formats[i];
2481 }
2482
2483 return NULL;
2484 }
2485
2486 static const struct drm_format_info *
2487 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2488 {
2489 switch (cmd->modifier[0]) {
2490 case I915_FORMAT_MOD_Y_TILED_CCS:
2491 case I915_FORMAT_MOD_Yf_TILED_CCS:
2492 return lookup_format_info(ccs_formats,
2493 ARRAY_SIZE(ccs_formats),
2494 cmd->pixel_format);
2495 default:
2496 return NULL;
2497 }
2498 }
2499
2500 bool is_ccs_modifier(u64 modifier)
2501 {
2502 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2503 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2504 }
2505
2506 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2507 u32 pixel_format, u64 modifier)
2508 {
2509 struct intel_crtc *crtc;
2510 struct intel_plane *plane;
2511
2512 /*
2513 * We assume the primary plane for pipe A has
2514 * the highest stride limits of them all.
2515 */
2516 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2517 plane = to_intel_plane(crtc->base.primary);
2518
2519 return plane->max_stride(plane, pixel_format, modifier,
2520 DRM_MODE_ROTATE_0);
2521 }
2522
2523 static
2524 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2525 u32 pixel_format, u64 modifier)
2526 {
2527 /*
2528 * Arbitrary limit for gen4+ chosen to match the
2529 * render engine max stride.
2530 *
2531 * The new CCS hash mode makes remapping impossible
2532 */
2533 if (!is_ccs_modifier(modifier)) {
2534 if (INTEL_GEN(dev_priv) >= 7)
2535 return 256*1024;
2536 else if (INTEL_GEN(dev_priv) >= 4)
2537 return 128*1024;
2538 }
2539
2540 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2541 }
2542
2543 static u32
2544 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2545 {
2546 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2547
2548 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2549 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2550 fb->format->format,
2551 fb->modifier);
2552
2553 /*
2554 * To make remapping with linear generally feasible
2555 * we need the stride to be page aligned.
2556 */
2557 if (fb->pitches[color_plane] > max_stride)
2558 return intel_tile_size(dev_priv);
2559 else
2560 return 64;
2561 } else {
2562 return intel_tile_width_bytes(fb, color_plane);
2563 }
2564 }
2565
2566 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2567 {
2568 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2569 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2570 const struct drm_framebuffer *fb = plane_state->base.fb;
2571 int i;
2572
2573 /* We don't want to deal with remapping with cursors */
2574 if (plane->id == PLANE_CURSOR)
2575 return false;
2576
2577 /*
2578 * The display engine limits already match/exceed the
2579 * render engine limits, so not much point in remapping.
2580 * Would also need to deal with the fence POT alignment
2581 * and gen2 2KiB GTT tile size.
2582 */
2583 if (INTEL_GEN(dev_priv) < 4)
2584 return false;
2585
2586 /*
2587 * The new CCS hash mode isn't compatible with remapping as
2588 * the virtual address of the pages affects the compressed data.
2589 */
2590 if (is_ccs_modifier(fb->modifier))
2591 return false;
2592
2593 /* Linear needs a page aligned stride for remapping */
2594 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2595 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2596
2597 for (i = 0; i < fb->format->num_planes; i++) {
2598 if (fb->pitches[i] & alignment)
2599 return false;
2600 }
2601 }
2602
2603 return true;
2604 }
2605
2606 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2607 {
2608 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2609 const struct drm_framebuffer *fb = plane_state->base.fb;
2610 unsigned int rotation = plane_state->base.rotation;
2611 u32 stride, max_stride;
2612
2613 /*
2614 * No remapping for invisible planes since we don't have
2615 * an actual source viewport to remap.
2616 */
2617 if (!plane_state->base.visible)
2618 return false;
2619
2620 if (!intel_plane_can_remap(plane_state))
2621 return false;
2622
2623 /*
2624 * FIXME: aux plane limits on gen9+ are
2625 * unclear in Bspec, for now no checking.
2626 */
2627 stride = intel_fb_pitch(fb, 0, rotation);
2628 max_stride = plane->max_stride(plane, fb->format->format,
2629 fb->modifier, rotation);
2630
2631 return stride > max_stride;
2632 }
2633
2634 static int
2635 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2636 struct drm_framebuffer *fb)
2637 {
2638 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2639 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2640 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2641 u32 gtt_offset_rotated = 0;
2642 unsigned int max_size = 0;
2643 int i, num_planes = fb->format->num_planes;
2644 unsigned int tile_size = intel_tile_size(dev_priv);
2645
2646 for (i = 0; i < num_planes; i++) {
2647 unsigned int width, height;
2648 unsigned int cpp, size;
2649 u32 offset;
2650 int x, y;
2651 int ret;
2652
2653 cpp = fb->format->cpp[i];
2654 width = drm_framebuffer_plane_width(fb->width, fb, i);
2655 height = drm_framebuffer_plane_height(fb->height, fb, i);
2656
2657 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2658 if (ret) {
2659 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2660 i, fb->offsets[i]);
2661 return ret;
2662 }
2663
2664 if (is_ccs_modifier(fb->modifier) && i == 1) {
2665 int hsub = fb->format->hsub;
2666 int vsub = fb->format->vsub;
2667 int tile_width, tile_height;
2668 int main_x, main_y;
2669 int ccs_x, ccs_y;
2670
2671 intel_tile_dims(fb, i, &tile_width, &tile_height);
2672 tile_width *= hsub;
2673 tile_height *= vsub;
2674
2675 ccs_x = (x * hsub) % tile_width;
2676 ccs_y = (y * vsub) % tile_height;
2677 main_x = intel_fb->normal[0].x % tile_width;
2678 main_y = intel_fb->normal[0].y % tile_height;
2679
2680 /*
2681 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2682 * x/y offsets must match between CCS and the main surface.
2683 */
2684 if (main_x != ccs_x || main_y != ccs_y) {
2685 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2686 main_x, main_y,
2687 ccs_x, ccs_y,
2688 intel_fb->normal[0].x,
2689 intel_fb->normal[0].y,
2690 x, y);
2691 return -EINVAL;
2692 }
2693 }
2694
2695 /*
2696 * The fence (if used) is aligned to the start of the object
2697 * so having the framebuffer wrap around across the edge of the
2698 * fenced region doesn't really work. We have no API to configure
2699 * the fence start offset within the object (nor could we probably
2700 * on gen2/3). So it's just easier if we just require that the
2701 * fb layout agrees with the fence layout. We already check that the
2702 * fb stride matches the fence stride elsewhere.
2703 */
2704 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2705 (x + width) * cpp > fb->pitches[i]) {
2706 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2707 i, fb->offsets[i]);
2708 return -EINVAL;
2709 }
2710
2711 /*
2712 * First pixel of the framebuffer from
2713 * the start of the normal gtt mapping.
2714 */
2715 intel_fb->normal[i].x = x;
2716 intel_fb->normal[i].y = y;
2717
2718 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2719 fb->pitches[i],
2720 DRM_MODE_ROTATE_0,
2721 tile_size);
2722 offset /= tile_size;
2723
2724 if (!is_surface_linear(fb->modifier, i)) {
2725 unsigned int tile_width, tile_height;
2726 unsigned int pitch_tiles;
2727 struct drm_rect r;
2728
2729 intel_tile_dims(fb, i, &tile_width, &tile_height);
2730
2731 rot_info->plane[i].offset = offset;
2732 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2733 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2734 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2735
2736 intel_fb->rotated[i].pitch =
2737 rot_info->plane[i].height * tile_height;
2738
2739 /* how many tiles does this plane need */
2740 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2741 /*
2742 * If the plane isn't horizontally tile aligned,
2743 * we need one more tile.
2744 */
2745 if (x != 0)
2746 size++;
2747
2748 /* rotate the x/y offsets to match the GTT view */
2749 r.x1 = x;
2750 r.y1 = y;
2751 r.x2 = x + width;
2752 r.y2 = y + height;
2753 drm_rect_rotate(&r,
2754 rot_info->plane[i].width * tile_width,
2755 rot_info->plane[i].height * tile_height,
2756 DRM_MODE_ROTATE_270);
2757 x = r.x1;
2758 y = r.y1;
2759
2760 /* rotate the tile dimensions to match the GTT view */
2761 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2762 swap(tile_width, tile_height);
2763
2764 /*
2765 * We only keep the x/y offsets, so push all of the
2766 * gtt offset into the x/y offsets.
2767 */
2768 intel_adjust_tile_offset(&x, &y,
2769 tile_width, tile_height,
2770 tile_size, pitch_tiles,
2771 gtt_offset_rotated * tile_size, 0);
2772
2773 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2774
2775 /*
2776 * First pixel of the framebuffer from
2777 * the start of the rotated gtt mapping.
2778 */
2779 intel_fb->rotated[i].x = x;
2780 intel_fb->rotated[i].y = y;
2781 } else {
2782 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2783 x * cpp, tile_size);
2784 }
2785
2786 /* how many tiles in total needed in the bo */
2787 max_size = max(max_size, offset + size);
2788 }
2789
2790 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2791 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2792 mul_u32_u32(max_size, tile_size), obj->base.size);
2793 return -EINVAL;
2794 }
2795
2796 return 0;
2797 }
2798
2799 static void
2800 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2801 {
2802 struct drm_i915_private *dev_priv =
2803 to_i915(plane_state->base.plane->dev);
2804 struct drm_framebuffer *fb = plane_state->base.fb;
2805 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2806 struct intel_rotation_info *info = &plane_state->view.rotated;
2807 unsigned int rotation = plane_state->base.rotation;
2808 int i, num_planes = fb->format->num_planes;
2809 unsigned int tile_size = intel_tile_size(dev_priv);
2810 unsigned int src_x, src_y;
2811 unsigned int src_w, src_h;
2812 u32 gtt_offset = 0;
2813
2814 memset(&plane_state->view, 0, sizeof(plane_state->view));
2815 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2816 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2817
2818 src_x = plane_state->base.src.x1 >> 16;
2819 src_y = plane_state->base.src.y1 >> 16;
2820 src_w = drm_rect_width(&plane_state->base.src) >> 16;
2821 src_h = drm_rect_height(&plane_state->base.src) >> 16;
2822
2823 WARN_ON(is_ccs_modifier(fb->modifier));
2824
2825 /* Make src coordinates relative to the viewport */
2826 drm_rect_translate(&plane_state->base.src,
2827 -(src_x << 16), -(src_y << 16));
2828
2829 /* Rotate src coordinates to match rotated GTT view */
2830 if (drm_rotation_90_or_270(rotation))
2831 drm_rect_rotate(&plane_state->base.src,
2832 src_w << 16, src_h << 16,
2833 DRM_MODE_ROTATE_270);
2834
2835 for (i = 0; i < num_planes; i++) {
2836 unsigned int hsub = i ? fb->format->hsub : 1;
2837 unsigned int vsub = i ? fb->format->vsub : 1;
2838 unsigned int cpp = fb->format->cpp[i];
2839 unsigned int tile_width, tile_height;
2840 unsigned int width, height;
2841 unsigned int pitch_tiles;
2842 unsigned int x, y;
2843 u32 offset;
2844
2845 intel_tile_dims(fb, i, &tile_width, &tile_height);
2846
2847 x = src_x / hsub;
2848 y = src_y / vsub;
2849 width = src_w / hsub;
2850 height = src_h / vsub;
2851
2852 /*
2853 * First pixel of the src viewport from the
2854 * start of the normal gtt mapping.
2855 */
2856 x += intel_fb->normal[i].x;
2857 y += intel_fb->normal[i].y;
2858
2859 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2860 fb, i, fb->pitches[i],
2861 DRM_MODE_ROTATE_0, tile_size);
2862 offset /= tile_size;
2863
2864 info->plane[i].offset = offset;
2865 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2866 tile_width * cpp);
2867 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2868 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2869
2870 if (drm_rotation_90_or_270(rotation)) {
2871 struct drm_rect r;
2872
2873 /* rotate the x/y offsets to match the GTT view */
2874 r.x1 = x;
2875 r.y1 = y;
2876 r.x2 = x + width;
2877 r.y2 = y + height;
2878 drm_rect_rotate(&r,
2879 info->plane[i].width * tile_width,
2880 info->plane[i].height * tile_height,
2881 DRM_MODE_ROTATE_270);
2882 x = r.x1;
2883 y = r.y1;
2884
2885 pitch_tiles = info->plane[i].height;
2886 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2887
2888 /* rotate the tile dimensions to match the GTT view */
2889 swap(tile_width, tile_height);
2890 } else {
2891 pitch_tiles = info->plane[i].width;
2892 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2893 }
2894
2895 /*
2896 * We only keep the x/y offsets, so push all of the
2897 * gtt offset into the x/y offsets.
2898 */
2899 intel_adjust_tile_offset(&x, &y,
2900 tile_width, tile_height,
2901 tile_size, pitch_tiles,
2902 gtt_offset * tile_size, 0);
2903
2904 gtt_offset += info->plane[i].width * info->plane[i].height;
2905
2906 plane_state->color_plane[i].offset = 0;
2907 plane_state->color_plane[i].x = x;
2908 plane_state->color_plane[i].y = y;
2909 }
2910 }
2911
2912 static int
2913 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2914 {
2915 const struct intel_framebuffer *fb =
2916 to_intel_framebuffer(plane_state->base.fb);
2917 unsigned int rotation = plane_state->base.rotation;
2918 int i, num_planes;
2919
2920 if (!fb)
2921 return 0;
2922
2923 num_planes = fb->base.format->num_planes;
2924
2925 if (intel_plane_needs_remap(plane_state)) {
2926 intel_plane_remap_gtt(plane_state);
2927
2928 /*
2929 * Sometimes even remapping can't overcome
2930 * the stride limitations :( Can happen with
2931 * big plane sizes and suitably misaligned
2932 * offsets.
2933 */
2934 return intel_plane_check_stride(plane_state);
2935 }
2936
2937 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2938
2939 for (i = 0; i < num_planes; i++) {
2940 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2941 plane_state->color_plane[i].offset = 0;
2942
2943 if (drm_rotation_90_or_270(rotation)) {
2944 plane_state->color_plane[i].x = fb->rotated[i].x;
2945 plane_state->color_plane[i].y = fb->rotated[i].y;
2946 } else {
2947 plane_state->color_plane[i].x = fb->normal[i].x;
2948 plane_state->color_plane[i].y = fb->normal[i].y;
2949 }
2950 }
2951
2952 /* Rotate src coordinates to match rotated GTT view */
2953 if (drm_rotation_90_or_270(rotation))
2954 drm_rect_rotate(&plane_state->base.src,
2955 fb->base.width << 16, fb->base.height << 16,
2956 DRM_MODE_ROTATE_270);
2957
2958 return intel_plane_check_stride(plane_state);
2959 }
2960
2961 static int i9xx_format_to_fourcc(int format)
2962 {
2963 switch (format) {
2964 case DISPPLANE_8BPP:
2965 return DRM_FORMAT_C8;
2966 case DISPPLANE_BGRX555:
2967 return DRM_FORMAT_XRGB1555;
2968 case DISPPLANE_BGRX565:
2969 return DRM_FORMAT_RGB565;
2970 default:
2971 case DISPPLANE_BGRX888:
2972 return DRM_FORMAT_XRGB8888;
2973 case DISPPLANE_RGBX888:
2974 return DRM_FORMAT_XBGR8888;
2975 case DISPPLANE_BGRX101010:
2976 return DRM_FORMAT_XRGB2101010;
2977 case DISPPLANE_RGBX101010:
2978 return DRM_FORMAT_XBGR2101010;
2979 }
2980 }
2981
2982 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2983 {
2984 switch (format) {
2985 case PLANE_CTL_FORMAT_RGB_565:
2986 return DRM_FORMAT_RGB565;
2987 case PLANE_CTL_FORMAT_NV12:
2988 return DRM_FORMAT_NV12;
2989 case PLANE_CTL_FORMAT_P010:
2990 return DRM_FORMAT_P010;
2991 case PLANE_CTL_FORMAT_P012:
2992 return DRM_FORMAT_P012;
2993 case PLANE_CTL_FORMAT_P016:
2994 return DRM_FORMAT_P016;
2995 case PLANE_CTL_FORMAT_Y210:
2996 return DRM_FORMAT_Y210;
2997 case PLANE_CTL_FORMAT_Y212:
2998 return DRM_FORMAT_Y212;
2999 case PLANE_CTL_FORMAT_Y216:
3000 return DRM_FORMAT_Y216;
3001 case PLANE_CTL_FORMAT_Y410:
3002 return DRM_FORMAT_XVYU2101010;
3003 case PLANE_CTL_FORMAT_Y412:
3004 return DRM_FORMAT_XVYU12_16161616;
3005 case PLANE_CTL_FORMAT_Y416:
3006 return DRM_FORMAT_XVYU16161616;
3007 default:
3008 case PLANE_CTL_FORMAT_XRGB_8888:
3009 if (rgb_order) {
3010 if (alpha)
3011 return DRM_FORMAT_ABGR8888;
3012 else
3013 return DRM_FORMAT_XBGR8888;
3014 } else {
3015 if (alpha)
3016 return DRM_FORMAT_ARGB8888;
3017 else
3018 return DRM_FORMAT_XRGB8888;
3019 }
3020 case PLANE_CTL_FORMAT_XRGB_2101010:
3021 if (rgb_order)
3022 return DRM_FORMAT_XBGR2101010;
3023 else
3024 return DRM_FORMAT_XRGB2101010;
3025 case PLANE_CTL_FORMAT_XRGB_16161616F:
3026 if (rgb_order) {
3027 if (alpha)
3028 return DRM_FORMAT_ABGR16161616F;
3029 else
3030 return DRM_FORMAT_XBGR16161616F;
3031 } else {
3032 if (alpha)
3033 return DRM_FORMAT_ARGB16161616F;
3034 else
3035 return DRM_FORMAT_XRGB16161616F;
3036 }
3037 }
3038 }
3039
3040 static bool
3041 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3042 struct intel_initial_plane_config *plane_config)
3043 {
3044 struct drm_device *dev = crtc->base.dev;
3045 struct drm_i915_private *dev_priv = to_i915(dev);
3046 struct drm_i915_gem_object *obj = NULL;
3047 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3048 struct drm_framebuffer *fb = &plane_config->fb->base;
3049 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3050 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3051 PAGE_SIZE);
3052
3053 size_aligned -= base_aligned;
3054
3055 if (plane_config->size == 0)
3056 return false;
3057
3058 /* If the FB is too big, just don't use it since fbdev is not very
3059 * important and we should probably use that space with FBC or other
3060 * features. */
3061 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3062 return false;
3063
3064 switch (fb->modifier) {
3065 case DRM_FORMAT_MOD_LINEAR:
3066 case I915_FORMAT_MOD_X_TILED:
3067 case I915_FORMAT_MOD_Y_TILED:
3068 break;
3069 default:
3070 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3071 fb->modifier);
3072 return false;
3073 }
3074
3075 mutex_lock(&dev->struct_mutex);
3076 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3077 base_aligned,
3078 base_aligned,
3079 size_aligned);
3080 mutex_unlock(&dev->struct_mutex);
3081 if (!obj)
3082 return false;
3083
3084 switch (plane_config->tiling) {
3085 case I915_TILING_NONE:
3086 break;
3087 case I915_TILING_X:
3088 case I915_TILING_Y:
3089 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3090 break;
3091 default:
3092 MISSING_CASE(plane_config->tiling);
3093 return false;
3094 }
3095
3096 mode_cmd.pixel_format = fb->format->format;
3097 mode_cmd.width = fb->width;
3098 mode_cmd.height = fb->height;
3099 mode_cmd.pitches[0] = fb->pitches[0];
3100 mode_cmd.modifier[0] = fb->modifier;
3101 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3102
3103 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3104 DRM_DEBUG_KMS("intel fb init failed\n");
3105 goto out_unref_obj;
3106 }
3107
3108
3109 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3110 return true;
3111
3112 out_unref_obj:
3113 i915_gem_object_put(obj);
3114 return false;
3115 }
3116
3117 static void
3118 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3119 struct intel_plane_state *plane_state,
3120 bool visible)
3121 {
3122 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3123
3124 plane_state->base.visible = visible;
3125
3126 if (visible)
3127 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3128 else
3129 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3130 }
3131
3132 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3133 {
3134 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3135 struct drm_plane *plane;
3136
3137 /*
3138 * Active_planes aliases if multiple "primary" or cursor planes
3139 * have been used on the same (or wrong) pipe. plane_mask uses
3140 * unique ids, hence we can use that to reconstruct active_planes.
3141 */
3142 crtc_state->active_planes = 0;
3143
3144 drm_for_each_plane_mask(plane, &dev_priv->drm,
3145 crtc_state->base.plane_mask)
3146 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3147 }
3148
3149 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3150 struct intel_plane *plane)
3151 {
3152 struct intel_crtc_state *crtc_state =
3153 to_intel_crtc_state(crtc->base.state);
3154 struct intel_plane_state *plane_state =
3155 to_intel_plane_state(plane->base.state);
3156
3157 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3158 plane->base.base.id, plane->base.name,
3159 crtc->base.base.id, crtc->base.name);
3160
3161 intel_set_plane_visible(crtc_state, plane_state, false);
3162 fixup_active_planes(crtc_state);
3163 crtc_state->data_rate[plane->id] = 0;
3164
3165 if (plane->id == PLANE_PRIMARY)
3166 intel_pre_disable_primary_noatomic(&crtc->base);
3167
3168 intel_disable_plane(plane, crtc_state);
3169 }
3170
3171 static void
3172 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3173 struct intel_initial_plane_config *plane_config)
3174 {
3175 struct drm_device *dev = intel_crtc->base.dev;
3176 struct drm_i915_private *dev_priv = to_i915(dev);
3177 struct drm_crtc *c;
3178 struct drm_i915_gem_object *obj;
3179 struct drm_plane *primary = intel_crtc->base.primary;
3180 struct drm_plane_state *plane_state = primary->state;
3181 struct intel_plane *intel_plane = to_intel_plane(primary);
3182 struct intel_plane_state *intel_state =
3183 to_intel_plane_state(plane_state);
3184 struct drm_framebuffer *fb;
3185
3186 if (!plane_config->fb)
3187 return;
3188
3189 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3190 fb = &plane_config->fb->base;
3191 goto valid_fb;
3192 }
3193
3194 kfree(plane_config->fb);
3195
3196 /*
3197 * Failed to alloc the obj, check to see if we should share
3198 * an fb with another CRTC instead
3199 */
3200 for_each_crtc(dev, c) {
3201 struct intel_plane_state *state;
3202
3203 if (c == &intel_crtc->base)
3204 continue;
3205
3206 if (!to_intel_crtc(c)->active)
3207 continue;
3208
3209 state = to_intel_plane_state(c->primary->state);
3210 if (!state->vma)
3211 continue;
3212
3213 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3214 fb = state->base.fb;
3215 drm_framebuffer_get(fb);
3216 goto valid_fb;
3217 }
3218 }
3219
3220 /*
3221 * We've failed to reconstruct the BIOS FB. Current display state
3222 * indicates that the primary plane is visible, but has a NULL FB,
3223 * which will lead to problems later if we don't fix it up. The
3224 * simplest solution is to just disable the primary plane now and
3225 * pretend the BIOS never had it enabled.
3226 */
3227 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3228
3229 return;
3230
3231 valid_fb:
3232 intel_state->base.rotation = plane_config->rotation;
3233 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3234 intel_state->base.rotation);
3235 intel_state->color_plane[0].stride =
3236 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3237
3238 mutex_lock(&dev->struct_mutex);
3239 intel_state->vma =
3240 intel_pin_and_fence_fb_obj(fb,
3241 &intel_state->view,
3242 intel_plane_uses_fence(intel_state),
3243 &intel_state->flags);
3244 mutex_unlock(&dev->struct_mutex);
3245 if (IS_ERR(intel_state->vma)) {
3246 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3247 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3248
3249 intel_state->vma = NULL;
3250 drm_framebuffer_put(fb);
3251 return;
3252 }
3253
3254 obj = intel_fb_obj(fb);
3255 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
3256
3257 plane_state->src_x = 0;
3258 plane_state->src_y = 0;
3259 plane_state->src_w = fb->width << 16;
3260 plane_state->src_h = fb->height << 16;
3261
3262 plane_state->crtc_x = 0;
3263 plane_state->crtc_y = 0;
3264 plane_state->crtc_w = fb->width;
3265 plane_state->crtc_h = fb->height;
3266
3267 intel_state->base.src = drm_plane_state_src(plane_state);
3268 intel_state->base.dst = drm_plane_state_dest(plane_state);
3269
3270 if (i915_gem_object_is_tiled(obj))
3271 dev_priv->preserve_bios_swizzle = true;
3272
3273 plane_state->fb = fb;
3274 plane_state->crtc = &intel_crtc->base;
3275
3276 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3277 &obj->frontbuffer_bits);
3278 }
3279
3280 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3281 int color_plane,
3282 unsigned int rotation)
3283 {
3284 int cpp = fb->format->cpp[color_plane];
3285
3286 switch (fb->modifier) {
3287 case DRM_FORMAT_MOD_LINEAR:
3288 case I915_FORMAT_MOD_X_TILED:
3289 return 4096;
3290 case I915_FORMAT_MOD_Y_TILED_CCS:
3291 case I915_FORMAT_MOD_Yf_TILED_CCS:
3292 /* FIXME AUX plane? */
3293 case I915_FORMAT_MOD_Y_TILED:
3294 case I915_FORMAT_MOD_Yf_TILED:
3295 if (cpp == 8)
3296 return 2048;
3297 else
3298 return 4096;
3299 default:
3300 MISSING_CASE(fb->modifier);
3301 return 2048;
3302 }
3303 }
3304
3305 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3306 int color_plane,
3307 unsigned int rotation)
3308 {
3309 int cpp = fb->format->cpp[color_plane];
3310
3311 switch (fb->modifier) {
3312 case DRM_FORMAT_MOD_LINEAR:
3313 case I915_FORMAT_MOD_X_TILED:
3314 if (cpp == 8)
3315 return 4096;
3316 else
3317 return 5120;
3318 case I915_FORMAT_MOD_Y_TILED_CCS:
3319 case I915_FORMAT_MOD_Yf_TILED_CCS:
3320 /* FIXME AUX plane? */
3321 case I915_FORMAT_MOD_Y_TILED:
3322 case I915_FORMAT_MOD_Yf_TILED:
3323 if (cpp == 8)
3324 return 2048;
3325 else
3326 return 5120;
3327 default:
3328 MISSING_CASE(fb->modifier);
3329 return 2048;
3330 }
3331 }
3332
3333 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3334 int color_plane,
3335 unsigned int rotation)
3336 {
3337 return 5120;
3338 }
3339
3340 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3341 int main_x, int main_y, u32 main_offset)
3342 {
3343 const struct drm_framebuffer *fb = plane_state->base.fb;
3344 int hsub = fb->format->hsub;
3345 int vsub = fb->format->vsub;
3346 int aux_x = plane_state->color_plane[1].x;
3347 int aux_y = plane_state->color_plane[1].y;
3348 u32 aux_offset = plane_state->color_plane[1].offset;
3349 u32 alignment = intel_surf_alignment(fb, 1);
3350
3351 while (aux_offset >= main_offset && aux_y <= main_y) {
3352 int x, y;
3353
3354 if (aux_x == main_x && aux_y == main_y)
3355 break;
3356
3357 if (aux_offset == 0)
3358 break;
3359
3360 x = aux_x / hsub;
3361 y = aux_y / vsub;
3362 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3363 aux_offset, aux_offset - alignment);
3364 aux_x = x * hsub + aux_x % hsub;
3365 aux_y = y * vsub + aux_y % vsub;
3366 }
3367
3368 if (aux_x != main_x || aux_y != main_y)
3369 return false;
3370
3371 plane_state->color_plane[1].offset = aux_offset;
3372 plane_state->color_plane[1].x = aux_x;
3373 plane_state->color_plane[1].y = aux_y;
3374
3375 return true;
3376 }
3377
3378 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3379 {
3380 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3381 const struct drm_framebuffer *fb = plane_state->base.fb;
3382 unsigned int rotation = plane_state->base.rotation;
3383 int x = plane_state->base.src.x1 >> 16;
3384 int y = plane_state->base.src.y1 >> 16;
3385 int w = drm_rect_width(&plane_state->base.src) >> 16;
3386 int h = drm_rect_height(&plane_state->base.src) >> 16;
3387 int max_width;
3388 int max_height = 4096;
3389 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3390
3391 if (INTEL_GEN(dev_priv) >= 11)
3392 max_width = icl_max_plane_width(fb, 0, rotation);
3393 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3394 max_width = glk_max_plane_width(fb, 0, rotation);
3395 else
3396 max_width = skl_max_plane_width(fb, 0, rotation);
3397
3398 if (w > max_width || h > max_height) {
3399 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3400 w, h, max_width, max_height);
3401 return -EINVAL;
3402 }
3403
3404 intel_add_fb_offsets(&x, &y, plane_state, 0);
3405 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3406 alignment = intel_surf_alignment(fb, 0);
3407
3408 /*
3409 * AUX surface offset is specified as the distance from the
3410 * main surface offset, and it must be non-negative. Make
3411 * sure that is what we will get.
3412 */
3413 if (offset > aux_offset)
3414 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3415 offset, aux_offset & ~(alignment - 1));
3416
3417 /*
3418 * When using an X-tiled surface, the plane blows up
3419 * if the x offset + width exceed the stride.
3420 *
3421 * TODO: linear and Y-tiled seem fine, Yf untested,
3422 */
3423 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3424 int cpp = fb->format->cpp[0];
3425
3426 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3427 if (offset == 0) {
3428 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3429 return -EINVAL;
3430 }
3431
3432 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3433 offset, offset - alignment);
3434 }
3435 }
3436
3437 /*
3438 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3439 * they match with the main surface x/y offsets.
3440 */
3441 if (is_ccs_modifier(fb->modifier)) {
3442 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3443 if (offset == 0)
3444 break;
3445
3446 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3447 offset, offset - alignment);
3448 }
3449
3450 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3451 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3452 return -EINVAL;
3453 }
3454 }
3455
3456 plane_state->color_plane[0].offset = offset;
3457 plane_state->color_plane[0].x = x;
3458 plane_state->color_plane[0].y = y;
3459
3460 /*
3461 * Put the final coordinates back so that the src
3462 * coordinate checks will see the right values.
3463 */
3464 drm_rect_translate(&plane_state->base.src,
3465 (x << 16) - plane_state->base.src.x1,
3466 (y << 16) - plane_state->base.src.y1);
3467
3468 return 0;
3469 }
3470
3471 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3472 {
3473 const struct drm_framebuffer *fb = plane_state->base.fb;
3474 unsigned int rotation = plane_state->base.rotation;
3475 int max_width = skl_max_plane_width(fb, 1, rotation);
3476 int max_height = 4096;
3477 int x = plane_state->base.src.x1 >> 17;
3478 int y = plane_state->base.src.y1 >> 17;
3479 int w = drm_rect_width(&plane_state->base.src) >> 17;
3480 int h = drm_rect_height(&plane_state->base.src) >> 17;
3481 u32 offset;
3482
3483 intel_add_fb_offsets(&x, &y, plane_state, 1);
3484 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3485
3486 /* FIXME not quite sure how/if these apply to the chroma plane */
3487 if (w > max_width || h > max_height) {
3488 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3489 w, h, max_width, max_height);
3490 return -EINVAL;
3491 }
3492
3493 plane_state->color_plane[1].offset = offset;
3494 plane_state->color_plane[1].x = x;
3495 plane_state->color_plane[1].y = y;
3496
3497 return 0;
3498 }
3499
3500 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3501 {
3502 const struct drm_framebuffer *fb = plane_state->base.fb;
3503 int src_x = plane_state->base.src.x1 >> 16;
3504 int src_y = plane_state->base.src.y1 >> 16;
3505 int hsub = fb->format->hsub;
3506 int vsub = fb->format->vsub;
3507 int x = src_x / hsub;
3508 int y = src_y / vsub;
3509 u32 offset;
3510
3511 intel_add_fb_offsets(&x, &y, plane_state, 1);
3512 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3513
3514 plane_state->color_plane[1].offset = offset;
3515 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3516 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3517
3518 return 0;
3519 }
3520
3521 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3522 {
3523 const struct drm_framebuffer *fb = plane_state->base.fb;
3524 int ret;
3525
3526 ret = intel_plane_compute_gtt(plane_state);
3527 if (ret)
3528 return ret;
3529
3530 if (!plane_state->base.visible)
3531 return 0;
3532
3533 /*
3534 * Handle the AUX surface first since
3535 * the main surface setup depends on it.
3536 */
3537 if (is_planar_yuv_format(fb->format->format)) {
3538 ret = skl_check_nv12_aux_surface(plane_state);
3539 if (ret)
3540 return ret;
3541 } else if (is_ccs_modifier(fb->modifier)) {
3542 ret = skl_check_ccs_aux_surface(plane_state);
3543 if (ret)
3544 return ret;
3545 } else {
3546 plane_state->color_plane[1].offset = ~0xfff;
3547 plane_state->color_plane[1].x = 0;
3548 plane_state->color_plane[1].y = 0;
3549 }
3550
3551 ret = skl_check_main_surface(plane_state);
3552 if (ret)
3553 return ret;
3554
3555 return 0;
3556 }
3557
3558 unsigned int
3559 i9xx_plane_max_stride(struct intel_plane *plane,
3560 u32 pixel_format, u64 modifier,
3561 unsigned int rotation)
3562 {
3563 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3564
3565 if (!HAS_GMCH(dev_priv)) {
3566 return 32*1024;
3567 } else if (INTEL_GEN(dev_priv) >= 4) {
3568 if (modifier == I915_FORMAT_MOD_X_TILED)
3569 return 16*1024;
3570 else
3571 return 32*1024;
3572 } else if (INTEL_GEN(dev_priv) >= 3) {
3573 if (modifier == I915_FORMAT_MOD_X_TILED)
3574 return 8*1024;
3575 else
3576 return 16*1024;
3577 } else {
3578 if (plane->i9xx_plane == PLANE_C)
3579 return 4*1024;
3580 else
3581 return 8*1024;
3582 }
3583 }
3584
3585 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3586 {
3587 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3588 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3589 u32 dspcntr = 0;
3590
3591 if (crtc_state->gamma_enable)
3592 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3593
3594 if (crtc_state->csc_enable)
3595 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3596
3597 if (INTEL_GEN(dev_priv) < 5)
3598 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3599
3600 return dspcntr;
3601 }
3602
3603 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3604 const struct intel_plane_state *plane_state)
3605 {
3606 struct drm_i915_private *dev_priv =
3607 to_i915(plane_state->base.plane->dev);
3608 const struct drm_framebuffer *fb = plane_state->base.fb;
3609 unsigned int rotation = plane_state->base.rotation;
3610 u32 dspcntr;
3611
3612 dspcntr = DISPLAY_PLANE_ENABLE;
3613
3614 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3615 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3616 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3617
3618 switch (fb->format->format) {
3619 case DRM_FORMAT_C8:
3620 dspcntr |= DISPPLANE_8BPP;
3621 break;
3622 case DRM_FORMAT_XRGB1555:
3623 dspcntr |= DISPPLANE_BGRX555;
3624 break;
3625 case DRM_FORMAT_RGB565:
3626 dspcntr |= DISPPLANE_BGRX565;
3627 break;
3628 case DRM_FORMAT_XRGB8888:
3629 dspcntr |= DISPPLANE_BGRX888;
3630 break;
3631 case DRM_FORMAT_XBGR8888:
3632 dspcntr |= DISPPLANE_RGBX888;
3633 break;
3634 case DRM_FORMAT_XRGB2101010:
3635 dspcntr |= DISPPLANE_BGRX101010;
3636 break;
3637 case DRM_FORMAT_XBGR2101010:
3638 dspcntr |= DISPPLANE_RGBX101010;
3639 break;
3640 default:
3641 MISSING_CASE(fb->format->format);
3642 return 0;
3643 }
3644
3645 if (INTEL_GEN(dev_priv) >= 4 &&
3646 fb->modifier == I915_FORMAT_MOD_X_TILED)
3647 dspcntr |= DISPPLANE_TILED;
3648
3649 if (rotation & DRM_MODE_ROTATE_180)
3650 dspcntr |= DISPPLANE_ROTATE_180;
3651
3652 if (rotation & DRM_MODE_REFLECT_X)
3653 dspcntr |= DISPPLANE_MIRROR;
3654
3655 return dspcntr;
3656 }
3657
3658 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3659 {
3660 struct drm_i915_private *dev_priv =
3661 to_i915(plane_state->base.plane->dev);
3662 int src_x, src_y;
3663 u32 offset;
3664 int ret;
3665
3666 ret = intel_plane_compute_gtt(plane_state);
3667 if (ret)
3668 return ret;
3669
3670 if (!plane_state->base.visible)
3671 return 0;
3672
3673 src_x = plane_state->base.src.x1 >> 16;
3674 src_y = plane_state->base.src.y1 >> 16;
3675
3676 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3677
3678 if (INTEL_GEN(dev_priv) >= 4)
3679 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3680 plane_state, 0);
3681 else
3682 offset = 0;
3683
3684 /*
3685 * Put the final coordinates back so that the src
3686 * coordinate checks will see the right values.
3687 */
3688 drm_rect_translate(&plane_state->base.src,
3689 (src_x << 16) - plane_state->base.src.x1,
3690 (src_y << 16) - plane_state->base.src.y1);
3691
3692 /* HSW/BDW do this automagically in hardware */
3693 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3694 unsigned int rotation = plane_state->base.rotation;
3695 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3696 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3697
3698 if (rotation & DRM_MODE_ROTATE_180) {
3699 src_x += src_w - 1;
3700 src_y += src_h - 1;
3701 } else if (rotation & DRM_MODE_REFLECT_X) {
3702 src_x += src_w - 1;
3703 }
3704 }
3705
3706 plane_state->color_plane[0].offset = offset;
3707 plane_state->color_plane[0].x = src_x;
3708 plane_state->color_plane[0].y = src_y;
3709
3710 return 0;
3711 }
3712
3713 static int
3714 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3715 struct intel_plane_state *plane_state)
3716 {
3717 int ret;
3718
3719 ret = chv_plane_check_rotation(plane_state);
3720 if (ret)
3721 return ret;
3722
3723 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3724 &crtc_state->base,
3725 DRM_PLANE_HELPER_NO_SCALING,
3726 DRM_PLANE_HELPER_NO_SCALING,
3727 false, true);
3728 if (ret)
3729 return ret;
3730
3731 ret = i9xx_check_plane_surface(plane_state);
3732 if (ret)
3733 return ret;
3734
3735 if (!plane_state->base.visible)
3736 return 0;
3737
3738 ret = intel_plane_check_src_coordinates(plane_state);
3739 if (ret)
3740 return ret;
3741
3742 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3743
3744 return 0;
3745 }
3746
3747 static void i9xx_update_plane(struct intel_plane *plane,
3748 const struct intel_crtc_state *crtc_state,
3749 const struct intel_plane_state *plane_state)
3750 {
3751 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3752 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3753 u32 linear_offset;
3754 int x = plane_state->color_plane[0].x;
3755 int y = plane_state->color_plane[0].y;
3756 unsigned long irqflags;
3757 u32 dspaddr_offset;
3758 u32 dspcntr;
3759
3760 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3761
3762 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3763
3764 if (INTEL_GEN(dev_priv) >= 4)
3765 dspaddr_offset = plane_state->color_plane[0].offset;
3766 else
3767 dspaddr_offset = linear_offset;
3768
3769 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3770
3771 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3772
3773 if (INTEL_GEN(dev_priv) < 4) {
3774 /* pipesrc and dspsize control the size that is scaled from,
3775 * which should always be the user's requested size.
3776 */
3777 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3778 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3779 ((crtc_state->pipe_src_h - 1) << 16) |
3780 (crtc_state->pipe_src_w - 1));
3781 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3782 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3783 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3784 ((crtc_state->pipe_src_h - 1) << 16) |
3785 (crtc_state->pipe_src_w - 1));
3786 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3787 }
3788
3789 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3790 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3791 } else if (INTEL_GEN(dev_priv) >= 4) {
3792 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3793 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3794 }
3795
3796 /*
3797 * The control register self-arms if the plane was previously
3798 * disabled. Try to make the plane enable atomic by writing
3799 * the control register just before the surface register.
3800 */
3801 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3802 if (INTEL_GEN(dev_priv) >= 4)
3803 I915_WRITE_FW(DSPSURF(i9xx_plane),
3804 intel_plane_ggtt_offset(plane_state) +
3805 dspaddr_offset);
3806 else
3807 I915_WRITE_FW(DSPADDR(i9xx_plane),
3808 intel_plane_ggtt_offset(plane_state) +
3809 dspaddr_offset);
3810
3811 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3812 }
3813
3814 static void i9xx_disable_plane(struct intel_plane *plane,
3815 const struct intel_crtc_state *crtc_state)
3816 {
3817 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3818 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3819 unsigned long irqflags;
3820 u32 dspcntr;
3821
3822 /*
3823 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3824 * enable on ilk+ affect the pipe bottom color as
3825 * well, so we must configure them even if the plane
3826 * is disabled.
3827 *
3828 * On pre-g4x there is no way to gamma correct the
3829 * pipe bottom color but we'll keep on doing this
3830 * anyway so that the crtc state readout works correctly.
3831 */
3832 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3833
3834 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3835
3836 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3837 if (INTEL_GEN(dev_priv) >= 4)
3838 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3839 else
3840 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3841
3842 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3843 }
3844
3845 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3846 enum pipe *pipe)
3847 {
3848 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3849 enum intel_display_power_domain power_domain;
3850 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3851 intel_wakeref_t wakeref;
3852 bool ret;
3853 u32 val;
3854
3855 /*
3856 * Not 100% correct for planes that can move between pipes,
3857 * but that's only the case for gen2-4 which don't have any
3858 * display power wells.
3859 */
3860 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3861 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3862 if (!wakeref)
3863 return false;
3864
3865 val = I915_READ(DSPCNTR(i9xx_plane));
3866
3867 ret = val & DISPLAY_PLANE_ENABLE;
3868
3869 if (INTEL_GEN(dev_priv) >= 5)
3870 *pipe = plane->pipe;
3871 else
3872 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3873 DISPPLANE_SEL_PIPE_SHIFT;
3874
3875 intel_display_power_put(dev_priv, power_domain, wakeref);
3876
3877 return ret;
3878 }
3879
3880 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3881 {
3882 struct drm_device *dev = intel_crtc->base.dev;
3883 struct drm_i915_private *dev_priv = to_i915(dev);
3884
3885 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3886 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3887 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3888 }
3889
3890 /*
3891 * This function detaches (aka. unbinds) unused scalers in hardware
3892 */
3893 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3894 {
3895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3896 const struct intel_crtc_scaler_state *scaler_state =
3897 &crtc_state->scaler_state;
3898 int i;
3899
3900 /* loop through and disable scalers that aren't in use */
3901 for (i = 0; i < intel_crtc->num_scalers; i++) {
3902 if (!scaler_state->scalers[i].in_use)
3903 skl_detach_scaler(intel_crtc, i);
3904 }
3905 }
3906
3907 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3908 int color_plane, unsigned int rotation)
3909 {
3910 /*
3911 * The stride is either expressed as a multiple of 64 bytes chunks for
3912 * linear buffers or in number of tiles for tiled buffers.
3913 */
3914 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3915 return 64;
3916 else if (drm_rotation_90_or_270(rotation))
3917 return intel_tile_height(fb, color_plane);
3918 else
3919 return intel_tile_width_bytes(fb, color_plane);
3920 }
3921
3922 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3923 int color_plane)
3924 {
3925 const struct drm_framebuffer *fb = plane_state->base.fb;
3926 unsigned int rotation = plane_state->base.rotation;
3927 u32 stride = plane_state->color_plane[color_plane].stride;
3928
3929 if (color_plane >= fb->format->num_planes)
3930 return 0;
3931
3932 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3933 }
3934
3935 static u32 skl_plane_ctl_format(u32 pixel_format)
3936 {
3937 switch (pixel_format) {
3938 case DRM_FORMAT_C8:
3939 return PLANE_CTL_FORMAT_INDEXED;
3940 case DRM_FORMAT_RGB565:
3941 return PLANE_CTL_FORMAT_RGB_565;
3942 case DRM_FORMAT_XBGR8888:
3943 case DRM_FORMAT_ABGR8888:
3944 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3945 case DRM_FORMAT_XRGB8888:
3946 case DRM_FORMAT_ARGB8888:
3947 return PLANE_CTL_FORMAT_XRGB_8888;
3948 case DRM_FORMAT_XRGB2101010:
3949 return PLANE_CTL_FORMAT_XRGB_2101010;
3950 case DRM_FORMAT_XBGR2101010:
3951 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3952 case DRM_FORMAT_XBGR16161616F:
3953 case DRM_FORMAT_ABGR16161616F:
3954 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3955 case DRM_FORMAT_XRGB16161616F:
3956 case DRM_FORMAT_ARGB16161616F:
3957 return PLANE_CTL_FORMAT_XRGB_16161616F;
3958 case DRM_FORMAT_YUYV:
3959 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3960 case DRM_FORMAT_YVYU:
3961 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3962 case DRM_FORMAT_UYVY:
3963 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3964 case DRM_FORMAT_VYUY:
3965 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3966 case DRM_FORMAT_NV12:
3967 return PLANE_CTL_FORMAT_NV12;
3968 case DRM_FORMAT_P010:
3969 return PLANE_CTL_FORMAT_P010;
3970 case DRM_FORMAT_P012:
3971 return PLANE_CTL_FORMAT_P012;
3972 case DRM_FORMAT_P016:
3973 return PLANE_CTL_FORMAT_P016;
3974 case DRM_FORMAT_Y210:
3975 return PLANE_CTL_FORMAT_Y210;
3976 case DRM_FORMAT_Y212:
3977 return PLANE_CTL_FORMAT_Y212;
3978 case DRM_FORMAT_Y216:
3979 return PLANE_CTL_FORMAT_Y216;
3980 case DRM_FORMAT_XVYU2101010:
3981 return PLANE_CTL_FORMAT_Y410;
3982 case DRM_FORMAT_XVYU12_16161616:
3983 return PLANE_CTL_FORMAT_Y412;
3984 case DRM_FORMAT_XVYU16161616:
3985 return PLANE_CTL_FORMAT_Y416;
3986 default:
3987 MISSING_CASE(pixel_format);
3988 }
3989
3990 return 0;
3991 }
3992
3993 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3994 {
3995 if (!plane_state->base.fb->format->has_alpha)
3996 return PLANE_CTL_ALPHA_DISABLE;
3997
3998 switch (plane_state->base.pixel_blend_mode) {
3999 case DRM_MODE_BLEND_PIXEL_NONE:
4000 return PLANE_CTL_ALPHA_DISABLE;
4001 case DRM_MODE_BLEND_PREMULTI:
4002 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4003 case DRM_MODE_BLEND_COVERAGE:
4004 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4005 default:
4006 MISSING_CASE(plane_state->base.pixel_blend_mode);
4007 return PLANE_CTL_ALPHA_DISABLE;
4008 }
4009 }
4010
4011 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4012 {
4013 if (!plane_state->base.fb->format->has_alpha)
4014 return PLANE_COLOR_ALPHA_DISABLE;
4015
4016 switch (plane_state->base.pixel_blend_mode) {
4017 case DRM_MODE_BLEND_PIXEL_NONE:
4018 return PLANE_COLOR_ALPHA_DISABLE;
4019 case DRM_MODE_BLEND_PREMULTI:
4020 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4021 case DRM_MODE_BLEND_COVERAGE:
4022 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4023 default:
4024 MISSING_CASE(plane_state->base.pixel_blend_mode);
4025 return PLANE_COLOR_ALPHA_DISABLE;
4026 }
4027 }
4028
4029 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4030 {
4031 switch (fb_modifier) {
4032 case DRM_FORMAT_MOD_LINEAR:
4033 break;
4034 case I915_FORMAT_MOD_X_TILED:
4035 return PLANE_CTL_TILED_X;
4036 case I915_FORMAT_MOD_Y_TILED:
4037 return PLANE_CTL_TILED_Y;
4038 case I915_FORMAT_MOD_Y_TILED_CCS:
4039 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4040 case I915_FORMAT_MOD_Yf_TILED:
4041 return PLANE_CTL_TILED_YF;
4042 case I915_FORMAT_MOD_Yf_TILED_CCS:
4043 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4044 default:
4045 MISSING_CASE(fb_modifier);
4046 }
4047
4048 return 0;
4049 }
4050
4051 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4052 {
4053 switch (rotate) {
4054 case DRM_MODE_ROTATE_0:
4055 break;
4056 /*
4057 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4058 * while i915 HW rotation is clockwise, thats why this swapping.
4059 */
4060 case DRM_MODE_ROTATE_90:
4061 return PLANE_CTL_ROTATE_270;
4062 case DRM_MODE_ROTATE_180:
4063 return PLANE_CTL_ROTATE_180;
4064 case DRM_MODE_ROTATE_270:
4065 return PLANE_CTL_ROTATE_90;
4066 default:
4067 MISSING_CASE(rotate);
4068 }
4069
4070 return 0;
4071 }
4072
4073 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4074 {
4075 switch (reflect) {
4076 case 0:
4077 break;
4078 case DRM_MODE_REFLECT_X:
4079 return PLANE_CTL_FLIP_HORIZONTAL;
4080 case DRM_MODE_REFLECT_Y:
4081 default:
4082 MISSING_CASE(reflect);
4083 }
4084
4085 return 0;
4086 }
4087
4088 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4089 {
4090 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4091 u32 plane_ctl = 0;
4092
4093 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4094 return plane_ctl;
4095
4096 if (crtc_state->gamma_enable)
4097 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4098
4099 if (crtc_state->csc_enable)
4100 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4101
4102 return plane_ctl;
4103 }
4104
4105 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4106 const struct intel_plane_state *plane_state)
4107 {
4108 struct drm_i915_private *dev_priv =
4109 to_i915(plane_state->base.plane->dev);
4110 const struct drm_framebuffer *fb = plane_state->base.fb;
4111 unsigned int rotation = plane_state->base.rotation;
4112 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4113 u32 plane_ctl;
4114
4115 plane_ctl = PLANE_CTL_ENABLE;
4116
4117 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4118 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4119 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4120
4121 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4122 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4123
4124 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4125 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4126 }
4127
4128 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4129 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4130 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4131
4132 if (INTEL_GEN(dev_priv) >= 10)
4133 plane_ctl |= cnl_plane_ctl_flip(rotation &
4134 DRM_MODE_REFLECT_MASK);
4135
4136 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4137 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4138 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4139 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4140
4141 return plane_ctl;
4142 }
4143
4144 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4145 {
4146 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4147 u32 plane_color_ctl = 0;
4148
4149 if (INTEL_GEN(dev_priv) >= 11)
4150 return plane_color_ctl;
4151
4152 if (crtc_state->gamma_enable)
4153 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4154
4155 if (crtc_state->csc_enable)
4156 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4157
4158 return plane_color_ctl;
4159 }
4160
4161 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4162 const struct intel_plane_state *plane_state)
4163 {
4164 struct drm_i915_private *dev_priv =
4165 to_i915(plane_state->base.plane->dev);
4166 const struct drm_framebuffer *fb = plane_state->base.fb;
4167 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4168 u32 plane_color_ctl = 0;
4169
4170 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4171 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4172
4173 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4174 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4175 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4176 else
4177 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4178
4179 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4180 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4181 } else if (fb->format->is_yuv) {
4182 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4183 }
4184
4185 return plane_color_ctl;
4186 }
4187
4188 static int
4189 __intel_display_resume(struct drm_device *dev,
4190 struct drm_atomic_state *state,
4191 struct drm_modeset_acquire_ctx *ctx)
4192 {
4193 struct drm_crtc_state *crtc_state;
4194 struct drm_crtc *crtc;
4195 int i, ret;
4196
4197 intel_modeset_setup_hw_state(dev, ctx);
4198 i915_redisable_vga(to_i915(dev));
4199
4200 if (!state)
4201 return 0;
4202
4203 /*
4204 * We've duplicated the state, pointers to the old state are invalid.
4205 *
4206 * Don't attempt to use the old state until we commit the duplicated state.
4207 */
4208 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4209 /*
4210 * Force recalculation even if we restore
4211 * current state. With fast modeset this may not result
4212 * in a modeset when the state is compatible.
4213 */
4214 crtc_state->mode_changed = true;
4215 }
4216
4217 /* ignore any reset values/BIOS leftovers in the WM registers */
4218 if (!HAS_GMCH(to_i915(dev)))
4219 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4220
4221 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4222
4223 WARN_ON(ret == -EDEADLK);
4224 return ret;
4225 }
4226
4227 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4228 {
4229 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4230 intel_has_gpu_reset(dev_priv));
4231 }
4232
4233 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4234 {
4235 struct drm_device *dev = &dev_priv->drm;
4236 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4237 struct drm_atomic_state *state;
4238 int ret;
4239
4240 /* reset doesn't touch the display */
4241 if (!i915_modparams.force_reset_modeset_test &&
4242 !gpu_reset_clobbers_display(dev_priv))
4243 return;
4244
4245 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4246 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4247 wake_up_all(&dev_priv->gpu_error.wait_queue);
4248
4249 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4250 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4251 i915_gem_set_wedged(dev_priv);
4252 }
4253
4254 /*
4255 * Need mode_config.mutex so that we don't
4256 * trample ongoing ->detect() and whatnot.
4257 */
4258 mutex_lock(&dev->mode_config.mutex);
4259 drm_modeset_acquire_init(ctx, 0);
4260 while (1) {
4261 ret = drm_modeset_lock_all_ctx(dev, ctx);
4262 if (ret != -EDEADLK)
4263 break;
4264
4265 drm_modeset_backoff(ctx);
4266 }
4267 /*
4268 * Disabling the crtcs gracefully seems nicer. Also the
4269 * g33 docs say we should at least disable all the planes.
4270 */
4271 state = drm_atomic_helper_duplicate_state(dev, ctx);
4272 if (IS_ERR(state)) {
4273 ret = PTR_ERR(state);
4274 DRM_ERROR("Duplicating state failed with %i\n", ret);
4275 return;
4276 }
4277
4278 ret = drm_atomic_helper_disable_all(dev, ctx);
4279 if (ret) {
4280 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4281 drm_atomic_state_put(state);
4282 return;
4283 }
4284
4285 dev_priv->modeset_restore_state = state;
4286 state->acquire_ctx = ctx;
4287 }
4288
4289 void intel_finish_reset(struct drm_i915_private *dev_priv)
4290 {
4291 struct drm_device *dev = &dev_priv->drm;
4292 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4293 struct drm_atomic_state *state;
4294 int ret;
4295
4296 /* reset doesn't touch the display */
4297 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
4298 return;
4299
4300 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4301 if (!state)
4302 goto unlock;
4303
4304 /* reset doesn't touch the display */
4305 if (!gpu_reset_clobbers_display(dev_priv)) {
4306 /* for testing only restore the display */
4307 ret = __intel_display_resume(dev, state, ctx);
4308 if (ret)
4309 DRM_ERROR("Restoring old state failed with %i\n", ret);
4310 } else {
4311 /*
4312 * The display has been reset as well,
4313 * so need a full re-initialization.
4314 */
4315 intel_pps_unlock_regs_wa(dev_priv);
4316 intel_modeset_init_hw(dev);
4317 intel_init_clock_gating(dev_priv);
4318
4319 spin_lock_irq(&dev_priv->irq_lock);
4320 if (dev_priv->display.hpd_irq_setup)
4321 dev_priv->display.hpd_irq_setup(dev_priv);
4322 spin_unlock_irq(&dev_priv->irq_lock);
4323
4324 ret = __intel_display_resume(dev, state, ctx);
4325 if (ret)
4326 DRM_ERROR("Restoring old state failed with %i\n", ret);
4327
4328 intel_hpd_init(dev_priv);
4329 }
4330
4331 drm_atomic_state_put(state);
4332 unlock:
4333 drm_modeset_drop_locks(ctx);
4334 drm_modeset_acquire_fini(ctx);
4335 mutex_unlock(&dev->mode_config.mutex);
4336
4337 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4338 }
4339
4340 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4341 {
4342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4343 enum pipe pipe = crtc->pipe;
4344 u32 tmp;
4345
4346 tmp = I915_READ(PIPE_CHICKEN(pipe));
4347
4348 /*
4349 * Display WA #1153: icl
4350 * enable hardware to bypass the alpha math
4351 * and rounding for per-pixel values 00 and 0xff
4352 */
4353 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4354 /*
4355 * Display WA # 1605353570: icl
4356 * Set the pixel rounding bit to 1 for allowing
4357 * passthrough of Frame buffer pixels unmodified
4358 * across pipe
4359 */
4360 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4361 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4362 }
4363
4364 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4365 const struct intel_crtc_state *new_crtc_state)
4366 {
4367 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4368 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4369
4370 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4371 crtc->base.mode = new_crtc_state->base.mode;
4372
4373 /*
4374 * Update pipe size and adjust fitter if needed: the reason for this is
4375 * that in compute_mode_changes we check the native mode (not the pfit
4376 * mode) to see if we can flip rather than do a full mode set. In the
4377 * fastboot case, we'll flip, but if we don't update the pipesrc and
4378 * pfit state, we'll end up with a big fb scanned out into the wrong
4379 * sized surface.
4380 */
4381
4382 I915_WRITE(PIPESRC(crtc->pipe),
4383 ((new_crtc_state->pipe_src_w - 1) << 16) |
4384 (new_crtc_state->pipe_src_h - 1));
4385
4386 /* on skylake this is done by detaching scalers */
4387 if (INTEL_GEN(dev_priv) >= 9) {
4388 skl_detach_scalers(new_crtc_state);
4389
4390 if (new_crtc_state->pch_pfit.enabled)
4391 skylake_pfit_enable(new_crtc_state);
4392 } else if (HAS_PCH_SPLIT(dev_priv)) {
4393 if (new_crtc_state->pch_pfit.enabled)
4394 ironlake_pfit_enable(new_crtc_state);
4395 else if (old_crtc_state->pch_pfit.enabled)
4396 ironlake_pfit_disable(old_crtc_state);
4397 }
4398
4399 if (INTEL_GEN(dev_priv) >= 11)
4400 icl_set_pipe_chicken(crtc);
4401 }
4402
4403 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4404 {
4405 struct drm_device *dev = crtc->base.dev;
4406 struct drm_i915_private *dev_priv = to_i915(dev);
4407 int pipe = crtc->pipe;
4408 i915_reg_t reg;
4409 u32 temp;
4410
4411 /* enable normal train */
4412 reg = FDI_TX_CTL(pipe);
4413 temp = I915_READ(reg);
4414 if (IS_IVYBRIDGE(dev_priv)) {
4415 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4416 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4417 } else {
4418 temp &= ~FDI_LINK_TRAIN_NONE;
4419 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4420 }
4421 I915_WRITE(reg, temp);
4422
4423 reg = FDI_RX_CTL(pipe);
4424 temp = I915_READ(reg);
4425 if (HAS_PCH_CPT(dev_priv)) {
4426 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4427 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4428 } else {
4429 temp &= ~FDI_LINK_TRAIN_NONE;
4430 temp |= FDI_LINK_TRAIN_NONE;
4431 }
4432 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4433
4434 /* wait one idle pattern time */
4435 POSTING_READ(reg);
4436 udelay(1000);
4437
4438 /* IVB wants error correction enabled */
4439 if (IS_IVYBRIDGE(dev_priv))
4440 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4441 FDI_FE_ERRC_ENABLE);
4442 }
4443
4444 /* The FDI link training functions for ILK/Ibexpeak. */
4445 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4446 const struct intel_crtc_state *crtc_state)
4447 {
4448 struct drm_device *dev = crtc->base.dev;
4449 struct drm_i915_private *dev_priv = to_i915(dev);
4450 int pipe = crtc->pipe;
4451 i915_reg_t reg;
4452 u32 temp, tries;
4453
4454 /* FDI needs bits from pipe first */
4455 assert_pipe_enabled(dev_priv, pipe);
4456
4457 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4458 for train result */
4459 reg = FDI_RX_IMR(pipe);
4460 temp = I915_READ(reg);
4461 temp &= ~FDI_RX_SYMBOL_LOCK;
4462 temp &= ~FDI_RX_BIT_LOCK;
4463 I915_WRITE(reg, temp);
4464 I915_READ(reg);
4465 udelay(150);
4466
4467 /* enable CPU FDI TX and PCH FDI RX */
4468 reg = FDI_TX_CTL(pipe);
4469 temp = I915_READ(reg);
4470 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4471 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4472 temp &= ~FDI_LINK_TRAIN_NONE;
4473 temp |= FDI_LINK_TRAIN_PATTERN_1;
4474 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4475
4476 reg = FDI_RX_CTL(pipe);
4477 temp = I915_READ(reg);
4478 temp &= ~FDI_LINK_TRAIN_NONE;
4479 temp |= FDI_LINK_TRAIN_PATTERN_1;
4480 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4481
4482 POSTING_READ(reg);
4483 udelay(150);
4484
4485 /* Ironlake workaround, enable clock pointer after FDI enable*/
4486 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4487 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4488 FDI_RX_PHASE_SYNC_POINTER_EN);
4489
4490 reg = FDI_RX_IIR(pipe);
4491 for (tries = 0; tries < 5; tries++) {
4492 temp = I915_READ(reg);
4493 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4494
4495 if ((temp & FDI_RX_BIT_LOCK)) {
4496 DRM_DEBUG_KMS("FDI train 1 done.\n");
4497 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4498 break;
4499 }
4500 }
4501 if (tries == 5)
4502 DRM_ERROR("FDI train 1 fail!\n");
4503
4504 /* Train 2 */
4505 reg = FDI_TX_CTL(pipe);
4506 temp = I915_READ(reg);
4507 temp &= ~FDI_LINK_TRAIN_NONE;
4508 temp |= FDI_LINK_TRAIN_PATTERN_2;
4509 I915_WRITE(reg, temp);
4510
4511 reg = FDI_RX_CTL(pipe);
4512 temp = I915_READ(reg);
4513 temp &= ~FDI_LINK_TRAIN_NONE;
4514 temp |= FDI_LINK_TRAIN_PATTERN_2;
4515 I915_WRITE(reg, temp);
4516
4517 POSTING_READ(reg);
4518 udelay(150);
4519
4520 reg = FDI_RX_IIR(pipe);
4521 for (tries = 0; tries < 5; tries++) {
4522 temp = I915_READ(reg);
4523 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4524
4525 if (temp & FDI_RX_SYMBOL_LOCK) {
4526 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4527 DRM_DEBUG_KMS("FDI train 2 done.\n");
4528 break;
4529 }
4530 }
4531 if (tries == 5)
4532 DRM_ERROR("FDI train 2 fail!\n");
4533
4534 DRM_DEBUG_KMS("FDI train done\n");
4535
4536 }
4537
4538 static const int snb_b_fdi_train_param[] = {
4539 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4540 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4541 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4542 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4543 };
4544
4545 /* The FDI link training functions for SNB/Cougarpoint. */
4546 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4547 const struct intel_crtc_state *crtc_state)
4548 {
4549 struct drm_device *dev = crtc->base.dev;
4550 struct drm_i915_private *dev_priv = to_i915(dev);
4551 int pipe = crtc->pipe;
4552 i915_reg_t reg;
4553 u32 temp, i, retry;
4554
4555 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4556 for train result */
4557 reg = FDI_RX_IMR(pipe);
4558 temp = I915_READ(reg);
4559 temp &= ~FDI_RX_SYMBOL_LOCK;
4560 temp &= ~FDI_RX_BIT_LOCK;
4561 I915_WRITE(reg, temp);
4562
4563 POSTING_READ(reg);
4564 udelay(150);
4565
4566 /* enable CPU FDI TX and PCH FDI RX */
4567 reg = FDI_TX_CTL(pipe);
4568 temp = I915_READ(reg);
4569 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4570 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4571 temp &= ~FDI_LINK_TRAIN_NONE;
4572 temp |= FDI_LINK_TRAIN_PATTERN_1;
4573 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4574 /* SNB-B */
4575 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4576 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4577
4578 I915_WRITE(FDI_RX_MISC(pipe),
4579 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4580
4581 reg = FDI_RX_CTL(pipe);
4582 temp = I915_READ(reg);
4583 if (HAS_PCH_CPT(dev_priv)) {
4584 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4585 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4586 } else {
4587 temp &= ~FDI_LINK_TRAIN_NONE;
4588 temp |= FDI_LINK_TRAIN_PATTERN_1;
4589 }
4590 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4591
4592 POSTING_READ(reg);
4593 udelay(150);
4594
4595 for (i = 0; i < 4; i++) {
4596 reg = FDI_TX_CTL(pipe);
4597 temp = I915_READ(reg);
4598 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4599 temp |= snb_b_fdi_train_param[i];
4600 I915_WRITE(reg, temp);
4601
4602 POSTING_READ(reg);
4603 udelay(500);
4604
4605 for (retry = 0; retry < 5; retry++) {
4606 reg = FDI_RX_IIR(pipe);
4607 temp = I915_READ(reg);
4608 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4609 if (temp & FDI_RX_BIT_LOCK) {
4610 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4611 DRM_DEBUG_KMS("FDI train 1 done.\n");
4612 break;
4613 }
4614 udelay(50);
4615 }
4616 if (retry < 5)
4617 break;
4618 }
4619 if (i == 4)
4620 DRM_ERROR("FDI train 1 fail!\n");
4621
4622 /* Train 2 */
4623 reg = FDI_TX_CTL(pipe);
4624 temp = I915_READ(reg);
4625 temp &= ~FDI_LINK_TRAIN_NONE;
4626 temp |= FDI_LINK_TRAIN_PATTERN_2;
4627 if (IS_GEN(dev_priv, 6)) {
4628 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4629 /* SNB-B */
4630 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4631 }
4632 I915_WRITE(reg, temp);
4633
4634 reg = FDI_RX_CTL(pipe);
4635 temp = I915_READ(reg);
4636 if (HAS_PCH_CPT(dev_priv)) {
4637 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4638 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4639 } else {
4640 temp &= ~FDI_LINK_TRAIN_NONE;
4641 temp |= FDI_LINK_TRAIN_PATTERN_2;
4642 }
4643 I915_WRITE(reg, temp);
4644
4645 POSTING_READ(reg);
4646 udelay(150);
4647
4648 for (i = 0; i < 4; i++) {
4649 reg = FDI_TX_CTL(pipe);
4650 temp = I915_READ(reg);
4651 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4652 temp |= snb_b_fdi_train_param[i];
4653 I915_WRITE(reg, temp);
4654
4655 POSTING_READ(reg);
4656 udelay(500);
4657
4658 for (retry = 0; retry < 5; retry++) {
4659 reg = FDI_RX_IIR(pipe);
4660 temp = I915_READ(reg);
4661 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4662 if (temp & FDI_RX_SYMBOL_LOCK) {
4663 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4664 DRM_DEBUG_KMS("FDI train 2 done.\n");
4665 break;
4666 }
4667 udelay(50);
4668 }
4669 if (retry < 5)
4670 break;
4671 }
4672 if (i == 4)
4673 DRM_ERROR("FDI train 2 fail!\n");
4674
4675 DRM_DEBUG_KMS("FDI train done.\n");
4676 }
4677
4678 /* Manual link training for Ivy Bridge A0 parts */
4679 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4680 const struct intel_crtc_state *crtc_state)
4681 {
4682 struct drm_device *dev = crtc->base.dev;
4683 struct drm_i915_private *dev_priv = to_i915(dev);
4684 int pipe = crtc->pipe;
4685 i915_reg_t reg;
4686 u32 temp, i, j;
4687
4688 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4689 for train result */
4690 reg = FDI_RX_IMR(pipe);
4691 temp = I915_READ(reg);
4692 temp &= ~FDI_RX_SYMBOL_LOCK;
4693 temp &= ~FDI_RX_BIT_LOCK;
4694 I915_WRITE(reg, temp);
4695
4696 POSTING_READ(reg);
4697 udelay(150);
4698
4699 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4700 I915_READ(FDI_RX_IIR(pipe)));
4701
4702 /* Try each vswing and preemphasis setting twice before moving on */
4703 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4704 /* disable first in case we need to retry */
4705 reg = FDI_TX_CTL(pipe);
4706 temp = I915_READ(reg);
4707 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4708 temp &= ~FDI_TX_ENABLE;
4709 I915_WRITE(reg, temp);
4710
4711 reg = FDI_RX_CTL(pipe);
4712 temp = I915_READ(reg);
4713 temp &= ~FDI_LINK_TRAIN_AUTO;
4714 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4715 temp &= ~FDI_RX_ENABLE;
4716 I915_WRITE(reg, temp);
4717
4718 /* enable CPU FDI TX and PCH FDI RX */
4719 reg = FDI_TX_CTL(pipe);
4720 temp = I915_READ(reg);
4721 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4722 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4723 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4724 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4725 temp |= snb_b_fdi_train_param[j/2];
4726 temp |= FDI_COMPOSITE_SYNC;
4727 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4728
4729 I915_WRITE(FDI_RX_MISC(pipe),
4730 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4731
4732 reg = FDI_RX_CTL(pipe);
4733 temp = I915_READ(reg);
4734 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4735 temp |= FDI_COMPOSITE_SYNC;
4736 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4737
4738 POSTING_READ(reg);
4739 udelay(1); /* should be 0.5us */
4740
4741 for (i = 0; i < 4; i++) {
4742 reg = FDI_RX_IIR(pipe);
4743 temp = I915_READ(reg);
4744 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4745
4746 if (temp & FDI_RX_BIT_LOCK ||
4747 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4748 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4749 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4750 i);
4751 break;
4752 }
4753 udelay(1); /* should be 0.5us */
4754 }
4755 if (i == 4) {
4756 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4757 continue;
4758 }
4759
4760 /* Train 2 */
4761 reg = FDI_TX_CTL(pipe);
4762 temp = I915_READ(reg);
4763 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4764 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4765 I915_WRITE(reg, temp);
4766
4767 reg = FDI_RX_CTL(pipe);
4768 temp = I915_READ(reg);
4769 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4770 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4771 I915_WRITE(reg, temp);
4772
4773 POSTING_READ(reg);
4774 udelay(2); /* should be 1.5us */
4775
4776 for (i = 0; i < 4; i++) {
4777 reg = FDI_RX_IIR(pipe);
4778 temp = I915_READ(reg);
4779 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4780
4781 if (temp & FDI_RX_SYMBOL_LOCK ||
4782 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4783 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4784 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4785 i);
4786 goto train_done;
4787 }
4788 udelay(2); /* should be 1.5us */
4789 }
4790 if (i == 4)
4791 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4792 }
4793
4794 train_done:
4795 DRM_DEBUG_KMS("FDI train done.\n");
4796 }
4797
4798 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4799 {
4800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4801 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4802 int pipe = intel_crtc->pipe;
4803 i915_reg_t reg;
4804 u32 temp;
4805
4806 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4807 reg = FDI_RX_CTL(pipe);
4808 temp = I915_READ(reg);
4809 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4810 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4811 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4812 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4813
4814 POSTING_READ(reg);
4815 udelay(200);
4816
4817 /* Switch from Rawclk to PCDclk */
4818 temp = I915_READ(reg);
4819 I915_WRITE(reg, temp | FDI_PCDCLK);
4820
4821 POSTING_READ(reg);
4822 udelay(200);
4823
4824 /* Enable CPU FDI TX PLL, always on for Ironlake */
4825 reg = FDI_TX_CTL(pipe);
4826 temp = I915_READ(reg);
4827 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4828 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4829
4830 POSTING_READ(reg);
4831 udelay(100);
4832 }
4833 }
4834
4835 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4836 {
4837 struct drm_device *dev = intel_crtc->base.dev;
4838 struct drm_i915_private *dev_priv = to_i915(dev);
4839 int pipe = intel_crtc->pipe;
4840 i915_reg_t reg;
4841 u32 temp;
4842
4843 /* Switch from PCDclk to Rawclk */
4844 reg = FDI_RX_CTL(pipe);
4845 temp = I915_READ(reg);
4846 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4847
4848 /* Disable CPU FDI TX PLL */
4849 reg = FDI_TX_CTL(pipe);
4850 temp = I915_READ(reg);
4851 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4852
4853 POSTING_READ(reg);
4854 udelay(100);
4855
4856 reg = FDI_RX_CTL(pipe);
4857 temp = I915_READ(reg);
4858 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4859
4860 /* Wait for the clocks to turn off. */
4861 POSTING_READ(reg);
4862 udelay(100);
4863 }
4864
4865 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4866 {
4867 struct drm_device *dev = crtc->dev;
4868 struct drm_i915_private *dev_priv = to_i915(dev);
4869 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4870 int pipe = intel_crtc->pipe;
4871 i915_reg_t reg;
4872 u32 temp;
4873
4874 /* disable CPU FDI tx and PCH FDI rx */
4875 reg = FDI_TX_CTL(pipe);
4876 temp = I915_READ(reg);
4877 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4878 POSTING_READ(reg);
4879
4880 reg = FDI_RX_CTL(pipe);
4881 temp = I915_READ(reg);
4882 temp &= ~(0x7 << 16);
4883 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4884 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4885
4886 POSTING_READ(reg);
4887 udelay(100);
4888
4889 /* Ironlake workaround, disable clock pointer after downing FDI */
4890 if (HAS_PCH_IBX(dev_priv))
4891 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4892
4893 /* still set train pattern 1 */
4894 reg = FDI_TX_CTL(pipe);
4895 temp = I915_READ(reg);
4896 temp &= ~FDI_LINK_TRAIN_NONE;
4897 temp |= FDI_LINK_TRAIN_PATTERN_1;
4898 I915_WRITE(reg, temp);
4899
4900 reg = FDI_RX_CTL(pipe);
4901 temp = I915_READ(reg);
4902 if (HAS_PCH_CPT(dev_priv)) {
4903 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4904 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4905 } else {
4906 temp &= ~FDI_LINK_TRAIN_NONE;
4907 temp |= FDI_LINK_TRAIN_PATTERN_1;
4908 }
4909 /* BPC in FDI rx is consistent with that in PIPECONF */
4910 temp &= ~(0x07 << 16);
4911 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4912 I915_WRITE(reg, temp);
4913
4914 POSTING_READ(reg);
4915 udelay(100);
4916 }
4917
4918 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4919 {
4920 struct drm_crtc *crtc;
4921 bool cleanup_done;
4922
4923 drm_for_each_crtc(crtc, &dev_priv->drm) {
4924 struct drm_crtc_commit *commit;
4925 spin_lock(&crtc->commit_lock);
4926 commit = list_first_entry_or_null(&crtc->commit_list,
4927 struct drm_crtc_commit, commit_entry);
4928 cleanup_done = commit ?
4929 try_wait_for_completion(&commit->cleanup_done) : true;
4930 spin_unlock(&crtc->commit_lock);
4931
4932 if (cleanup_done)
4933 continue;
4934
4935 drm_crtc_wait_one_vblank(crtc);
4936
4937 return true;
4938 }
4939
4940 return false;
4941 }
4942
4943 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4944 {
4945 u32 temp;
4946
4947 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4948
4949 mutex_lock(&dev_priv->sb_lock);
4950
4951 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4952 temp |= SBI_SSCCTL_DISABLE;
4953 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4954
4955 mutex_unlock(&dev_priv->sb_lock);
4956 }
4957
4958 /* Program iCLKIP clock to the desired frequency */
4959 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4960 {
4961 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4962 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4963 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4964 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4965 u32 temp;
4966
4967 lpt_disable_iclkip(dev_priv);
4968
4969 /* The iCLK virtual clock root frequency is in MHz,
4970 * but the adjusted_mode->crtc_clock in in KHz. To get the
4971 * divisors, it is necessary to divide one by another, so we
4972 * convert the virtual clock precision to KHz here for higher
4973 * precision.
4974 */
4975 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4976 u32 iclk_virtual_root_freq = 172800 * 1000;
4977 u32 iclk_pi_range = 64;
4978 u32 desired_divisor;
4979
4980 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4981 clock << auxdiv);
4982 divsel = (desired_divisor / iclk_pi_range) - 2;
4983 phaseinc = desired_divisor % iclk_pi_range;
4984
4985 /*
4986 * Near 20MHz is a corner case which is
4987 * out of range for the 7-bit divisor
4988 */
4989 if (divsel <= 0x7f)
4990 break;
4991 }
4992
4993 /* This should not happen with any sane values */
4994 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4995 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4996 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4997 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4998
4999 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5000 clock,
5001 auxdiv,
5002 divsel,
5003 phasedir,
5004 phaseinc);
5005
5006 mutex_lock(&dev_priv->sb_lock);
5007
5008 /* Program SSCDIVINTPHASE6 */
5009 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5010 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5011 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5012 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5013 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5014 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5015 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5016 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5017
5018 /* Program SSCAUXDIV */
5019 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5020 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5021 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5022 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5023
5024 /* Enable modulator and associated divider */
5025 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5026 temp &= ~SBI_SSCCTL_DISABLE;
5027 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5028
5029 mutex_unlock(&dev_priv->sb_lock);
5030
5031 /* Wait for initialization time */
5032 udelay(24);
5033
5034 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5035 }
5036
5037 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5038 {
5039 u32 divsel, phaseinc, auxdiv;
5040 u32 iclk_virtual_root_freq = 172800 * 1000;
5041 u32 iclk_pi_range = 64;
5042 u32 desired_divisor;
5043 u32 temp;
5044
5045 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5046 return 0;
5047
5048 mutex_lock(&dev_priv->sb_lock);
5049
5050 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5051 if (temp & SBI_SSCCTL_DISABLE) {
5052 mutex_unlock(&dev_priv->sb_lock);
5053 return 0;
5054 }
5055
5056 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5057 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5058 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5059 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5060 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5061
5062 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5063 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5064 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5065
5066 mutex_unlock(&dev_priv->sb_lock);
5067
5068 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5069
5070 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5071 desired_divisor << auxdiv);
5072 }
5073
5074 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5075 enum pipe pch_transcoder)
5076 {
5077 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5078 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5079 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5080
5081 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5082 I915_READ(HTOTAL(cpu_transcoder)));
5083 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5084 I915_READ(HBLANK(cpu_transcoder)));
5085 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5086 I915_READ(HSYNC(cpu_transcoder)));
5087
5088 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5089 I915_READ(VTOTAL(cpu_transcoder)));
5090 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5091 I915_READ(VBLANK(cpu_transcoder)));
5092 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5093 I915_READ(VSYNC(cpu_transcoder)));
5094 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5095 I915_READ(VSYNCSHIFT(cpu_transcoder)));
5096 }
5097
5098 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5099 {
5100 u32 temp;
5101
5102 temp = I915_READ(SOUTH_CHICKEN1);
5103 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5104 return;
5105
5106 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5107 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5108
5109 temp &= ~FDI_BC_BIFURCATION_SELECT;
5110 if (enable)
5111 temp |= FDI_BC_BIFURCATION_SELECT;
5112
5113 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5114 I915_WRITE(SOUTH_CHICKEN1, temp);
5115 POSTING_READ(SOUTH_CHICKEN1);
5116 }
5117
5118 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5119 {
5120 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5121 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5122
5123 switch (crtc->pipe) {
5124 case PIPE_A:
5125 break;
5126 case PIPE_B:
5127 if (crtc_state->fdi_lanes > 2)
5128 cpt_set_fdi_bc_bifurcation(dev_priv, false);
5129 else
5130 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5131
5132 break;
5133 case PIPE_C:
5134 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5135
5136 break;
5137 default:
5138 BUG();
5139 }
5140 }
5141
5142 /*
5143 * Finds the encoder associated with the given CRTC. This can only be
5144 * used when we know that the CRTC isn't feeding multiple encoders!
5145 */
5146 static struct intel_encoder *
5147 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5148 const struct intel_crtc_state *crtc_state)
5149 {
5150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5151 const struct drm_connector_state *connector_state;
5152 const struct drm_connector *connector;
5153 struct intel_encoder *encoder = NULL;
5154 int num_encoders = 0;
5155 int i;
5156
5157 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5158 if (connector_state->crtc != &crtc->base)
5159 continue;
5160
5161 encoder = to_intel_encoder(connector_state->best_encoder);
5162 num_encoders++;
5163 }
5164
5165 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5166 num_encoders, pipe_name(crtc->pipe));
5167
5168 return encoder;
5169 }
5170
5171 /*
5172 * Enable PCH resources required for PCH ports:
5173 * - PCH PLLs
5174 * - FDI training & RX/TX
5175 * - update transcoder timings
5176 * - DP transcoding bits
5177 * - transcoder
5178 */
5179 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5180 const struct intel_crtc_state *crtc_state)
5181 {
5182 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5183 struct drm_device *dev = crtc->base.dev;
5184 struct drm_i915_private *dev_priv = to_i915(dev);
5185 int pipe = crtc->pipe;
5186 u32 temp;
5187
5188 assert_pch_transcoder_disabled(dev_priv, pipe);
5189
5190 if (IS_IVYBRIDGE(dev_priv))
5191 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5192
5193 /* Write the TU size bits before fdi link training, so that error
5194 * detection works. */
5195 I915_WRITE(FDI_RX_TUSIZE1(pipe),
5196 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5197
5198 /* For PCH output, training FDI link */
5199 dev_priv->display.fdi_link_train(crtc, crtc_state);
5200
5201 /* We need to program the right clock selection before writing the pixel
5202 * mutliplier into the DPLL. */
5203 if (HAS_PCH_CPT(dev_priv)) {
5204 u32 sel;
5205
5206 temp = I915_READ(PCH_DPLL_SEL);
5207 temp |= TRANS_DPLL_ENABLE(pipe);
5208 sel = TRANS_DPLLB_SEL(pipe);
5209 if (crtc_state->shared_dpll ==
5210 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5211 temp |= sel;
5212 else
5213 temp &= ~sel;
5214 I915_WRITE(PCH_DPLL_SEL, temp);
5215 }
5216
5217 /* XXX: pch pll's can be enabled any time before we enable the PCH
5218 * transcoder, and we actually should do this to not upset any PCH
5219 * transcoder that already use the clock when we share it.
5220 *
5221 * Note that enable_shared_dpll tries to do the right thing, but
5222 * get_shared_dpll unconditionally resets the pll - we need that to have
5223 * the right LVDS enable sequence. */
5224 intel_enable_shared_dpll(crtc_state);
5225
5226 /* set transcoder timing, panel must allow it */
5227 assert_panel_unlocked(dev_priv, pipe);
5228 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5229
5230 intel_fdi_normal_train(crtc);
5231
5232 /* For PCH DP, enable TRANS_DP_CTL */
5233 if (HAS_PCH_CPT(dev_priv) &&
5234 intel_crtc_has_dp_encoder(crtc_state)) {
5235 const struct drm_display_mode *adjusted_mode =
5236 &crtc_state->base.adjusted_mode;
5237 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5238 i915_reg_t reg = TRANS_DP_CTL(pipe);
5239 enum port port;
5240
5241 temp = I915_READ(reg);
5242 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5243 TRANS_DP_SYNC_MASK |
5244 TRANS_DP_BPC_MASK);
5245 temp |= TRANS_DP_OUTPUT_ENABLE;
5246 temp |= bpc << 9; /* same format but at 11:9 */
5247
5248 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5249 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5250 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5251 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5252
5253 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5254 WARN_ON(port < PORT_B || port > PORT_D);
5255 temp |= TRANS_DP_PORT_SEL(port);
5256
5257 I915_WRITE(reg, temp);
5258 }
5259
5260 ironlake_enable_pch_transcoder(crtc_state);
5261 }
5262
5263 static void lpt_pch_enable(const struct intel_atomic_state *state,
5264 const struct intel_crtc_state *crtc_state)
5265 {
5266 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5267 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5268 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5269
5270 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5271
5272 lpt_program_iclkip(crtc_state);
5273
5274 /* Set transcoder timing. */
5275 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5276
5277 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5278 }
5279
5280 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
5281 {
5282 struct drm_i915_private *dev_priv = to_i915(dev);
5283 i915_reg_t dslreg = PIPEDSL(pipe);
5284 u32 temp;
5285
5286 temp = I915_READ(dslreg);
5287 udelay(500);
5288 if (wait_for(I915_READ(dslreg) != temp, 5)) {
5289 if (wait_for(I915_READ(dslreg) != temp, 5))
5290 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5291 }
5292 }
5293
5294 /*
5295 * The hardware phase 0.0 refers to the center of the pixel.
5296 * We want to start from the top/left edge which is phase
5297 * -0.5. That matches how the hardware calculates the scaling
5298 * factors (from top-left of the first pixel to bottom-right
5299 * of the last pixel, as opposed to the pixel centers).
5300 *
5301 * For 4:2:0 subsampled chroma planes we obviously have to
5302 * adjust that so that the chroma sample position lands in
5303 * the right spot.
5304 *
5305 * Note that for packed YCbCr 4:2:2 formats there is no way to
5306 * control chroma siting. The hardware simply replicates the
5307 * chroma samples for both of the luma samples, and thus we don't
5308 * actually get the expected MPEG2 chroma siting convention :(
5309 * The same behaviour is observed on pre-SKL platforms as well.
5310 *
5311 * Theory behind the formula (note that we ignore sub-pixel
5312 * source coordinates):
5313 * s = source sample position
5314 * d = destination sample position
5315 *
5316 * Downscaling 4:1:
5317 * -0.5
5318 * | 0.0
5319 * | | 1.5 (initial phase)
5320 * | | |
5321 * v v v
5322 * | s | s | s | s |
5323 * | d |
5324 *
5325 * Upscaling 1:4:
5326 * -0.5
5327 * | -0.375 (initial phase)
5328 * | | 0.0
5329 * | | |
5330 * v v v
5331 * | s |
5332 * | d | d | d | d |
5333 */
5334 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5335 {
5336 int phase = -0x8000;
5337 u16 trip = 0;
5338
5339 if (chroma_cosited)
5340 phase += (sub - 1) * 0x8000 / sub;
5341
5342 phase += scale / (2 * sub);
5343
5344 /*
5345 * Hardware initial phase limited to [-0.5:1.5].
5346 * Since the max hardware scale factor is 3.0, we
5347 * should never actually excdeed 1.0 here.
5348 */
5349 WARN_ON(phase < -0x8000 || phase > 0x18000);
5350
5351 if (phase < 0)
5352 phase = 0x10000 + phase;
5353 else
5354 trip = PS_PHASE_TRIP;
5355
5356 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5357 }
5358
5359 #define SKL_MIN_SRC_W 8
5360 #define SKL_MAX_SRC_W 4096
5361 #define SKL_MIN_SRC_H 8
5362 #define SKL_MAX_SRC_H 4096
5363 #define SKL_MIN_DST_W 8
5364 #define SKL_MAX_DST_W 4096
5365 #define SKL_MIN_DST_H 8
5366 #define SKL_MAX_DST_H 4096
5367 #define ICL_MAX_SRC_W 5120
5368 #define ICL_MAX_SRC_H 4096
5369 #define ICL_MAX_DST_W 5120
5370 #define ICL_MAX_DST_H 4096
5371 #define SKL_MIN_YUV_420_SRC_W 16
5372 #define SKL_MIN_YUV_420_SRC_H 16
5373
5374 static int
5375 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5376 unsigned int scaler_user, int *scaler_id,
5377 int src_w, int src_h, int dst_w, int dst_h,
5378 const struct drm_format_info *format, bool need_scaler)
5379 {
5380 struct intel_crtc_scaler_state *scaler_state =
5381 &crtc_state->scaler_state;
5382 struct intel_crtc *intel_crtc =
5383 to_intel_crtc(crtc_state->base.crtc);
5384 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5385 const struct drm_display_mode *adjusted_mode =
5386 &crtc_state->base.adjusted_mode;
5387
5388 /*
5389 * Src coordinates are already rotated by 270 degrees for
5390 * the 90/270 degree plane rotation cases (to match the
5391 * GTT mapping), hence no need to account for rotation here.
5392 */
5393 if (src_w != dst_w || src_h != dst_h)
5394 need_scaler = true;
5395
5396 /*
5397 * Scaling/fitting not supported in IF-ID mode in GEN9+
5398 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5399 * Once NV12 is enabled, handle it here while allocating scaler
5400 * for NV12.
5401 */
5402 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5403 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5404 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5405 return -EINVAL;
5406 }
5407
5408 /*
5409 * if plane is being disabled or scaler is no more required or force detach
5410 * - free scaler binded to this plane/crtc
5411 * - in order to do this, update crtc->scaler_usage
5412 *
5413 * Here scaler state in crtc_state is set free so that
5414 * scaler can be assigned to other user. Actual register
5415 * update to free the scaler is done in plane/panel-fit programming.
5416 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5417 */
5418 if (force_detach || !need_scaler) {
5419 if (*scaler_id >= 0) {
5420 scaler_state->scaler_users &= ~(1 << scaler_user);
5421 scaler_state->scalers[*scaler_id].in_use = 0;
5422
5423 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5424 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5425 intel_crtc->pipe, scaler_user, *scaler_id,
5426 scaler_state->scaler_users);
5427 *scaler_id = -1;
5428 }
5429 return 0;
5430 }
5431
5432 if (format && is_planar_yuv_format(format->format) &&
5433 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5434 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5435 return -EINVAL;
5436 }
5437
5438 /* range checks */
5439 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5440 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5441 (INTEL_GEN(dev_priv) >= 11 &&
5442 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5443 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5444 (INTEL_GEN(dev_priv) < 11 &&
5445 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5446 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5447 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5448 "size is out of scaler range\n",
5449 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5450 return -EINVAL;
5451 }
5452
5453 /* mark this plane as a scaler user in crtc_state */
5454 scaler_state->scaler_users |= (1 << scaler_user);
5455 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5456 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5457 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5458 scaler_state->scaler_users);
5459
5460 return 0;
5461 }
5462
5463 /**
5464 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5465 *
5466 * @state: crtc's scaler state
5467 *
5468 * Return
5469 * 0 - scaler_usage updated successfully
5470 * error - requested scaling cannot be supported or other error condition
5471 */
5472 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5473 {
5474 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5475 bool need_scaler = false;
5476
5477 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5478 need_scaler = true;
5479
5480 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5481 &state->scaler_state.scaler_id,
5482 state->pipe_src_w, state->pipe_src_h,
5483 adjusted_mode->crtc_hdisplay,
5484 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5485 }
5486
5487 /**
5488 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5489 * @crtc_state: crtc's scaler state
5490 * @plane_state: atomic plane state to update
5491 *
5492 * Return
5493 * 0 - scaler_usage updated successfully
5494 * error - requested scaling cannot be supported or other error condition
5495 */
5496 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5497 struct intel_plane_state *plane_state)
5498 {
5499 struct intel_plane *intel_plane =
5500 to_intel_plane(plane_state->base.plane);
5501 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5502 struct drm_framebuffer *fb = plane_state->base.fb;
5503 int ret;
5504 bool force_detach = !fb || !plane_state->base.visible;
5505 bool need_scaler = false;
5506
5507 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5508 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5509 fb && is_planar_yuv_format(fb->format->format))
5510 need_scaler = true;
5511
5512 ret = skl_update_scaler(crtc_state, force_detach,
5513 drm_plane_index(&intel_plane->base),
5514 &plane_state->scaler_id,
5515 drm_rect_width(&plane_state->base.src) >> 16,
5516 drm_rect_height(&plane_state->base.src) >> 16,
5517 drm_rect_width(&plane_state->base.dst),
5518 drm_rect_height(&plane_state->base.dst),
5519 fb ? fb->format : NULL, need_scaler);
5520
5521 if (ret || plane_state->scaler_id < 0)
5522 return ret;
5523
5524 /* check colorkey */
5525 if (plane_state->ckey.flags) {
5526 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5527 intel_plane->base.base.id,
5528 intel_plane->base.name);
5529 return -EINVAL;
5530 }
5531
5532 /* Check src format */
5533 switch (fb->format->format) {
5534 case DRM_FORMAT_RGB565:
5535 case DRM_FORMAT_XBGR8888:
5536 case DRM_FORMAT_XRGB8888:
5537 case DRM_FORMAT_ABGR8888:
5538 case DRM_FORMAT_ARGB8888:
5539 case DRM_FORMAT_XRGB2101010:
5540 case DRM_FORMAT_XBGR2101010:
5541 case DRM_FORMAT_XBGR16161616F:
5542 case DRM_FORMAT_ABGR16161616F:
5543 case DRM_FORMAT_XRGB16161616F:
5544 case DRM_FORMAT_ARGB16161616F:
5545 case DRM_FORMAT_YUYV:
5546 case DRM_FORMAT_YVYU:
5547 case DRM_FORMAT_UYVY:
5548 case DRM_FORMAT_VYUY:
5549 case DRM_FORMAT_NV12:
5550 case DRM_FORMAT_P010:
5551 case DRM_FORMAT_P012:
5552 case DRM_FORMAT_P016:
5553 case DRM_FORMAT_Y210:
5554 case DRM_FORMAT_Y212:
5555 case DRM_FORMAT_Y216:
5556 case DRM_FORMAT_XVYU2101010:
5557 case DRM_FORMAT_XVYU12_16161616:
5558 case DRM_FORMAT_XVYU16161616:
5559 break;
5560 default:
5561 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5562 intel_plane->base.base.id, intel_plane->base.name,
5563 fb->base.id, fb->format->format);
5564 return -EINVAL;
5565 }
5566
5567 return 0;
5568 }
5569
5570 static void skylake_scaler_disable(struct intel_crtc *crtc)
5571 {
5572 int i;
5573
5574 for (i = 0; i < crtc->num_scalers; i++)
5575 skl_detach_scaler(crtc, i);
5576 }
5577
5578 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5579 {
5580 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5581 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5582 enum pipe pipe = crtc->pipe;
5583 const struct intel_crtc_scaler_state *scaler_state =
5584 &crtc_state->scaler_state;
5585
5586 if (crtc_state->pch_pfit.enabled) {
5587 u16 uv_rgb_hphase, uv_rgb_vphase;
5588 int pfit_w, pfit_h, hscale, vscale;
5589 int id;
5590
5591 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5592 return;
5593
5594 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5595 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5596
5597 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5598 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5599
5600 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5601 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5602
5603 id = scaler_state->scaler_id;
5604 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5605 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5606 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5607 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5608 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5609 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5610 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5611 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5612 }
5613 }
5614
5615 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5616 {
5617 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5618 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5619 int pipe = crtc->pipe;
5620
5621 if (crtc_state->pch_pfit.enabled) {
5622 /* Force use of hard-coded filter coefficients
5623 * as some pre-programmed values are broken,
5624 * e.g. x201.
5625 */
5626 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5627 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5628 PF_PIPE_SEL_IVB(pipe));
5629 else
5630 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5631 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5632 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5633 }
5634 }
5635
5636 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5637 {
5638 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5639 struct drm_device *dev = crtc->base.dev;
5640 struct drm_i915_private *dev_priv = to_i915(dev);
5641
5642 if (!crtc_state->ips_enabled)
5643 return;
5644
5645 /*
5646 * We can only enable IPS after we enable a plane and wait for a vblank
5647 * This function is called from post_plane_update, which is run after
5648 * a vblank wait.
5649 */
5650 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5651
5652 if (IS_BROADWELL(dev_priv)) {
5653 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5654 IPS_ENABLE | IPS_PCODE_CONTROL));
5655 /* Quoting Art Runyan: "its not safe to expect any particular
5656 * value in IPS_CTL bit 31 after enabling IPS through the
5657 * mailbox." Moreover, the mailbox may return a bogus state,
5658 * so we need to just enable it and continue on.
5659 */
5660 } else {
5661 I915_WRITE(IPS_CTL, IPS_ENABLE);
5662 /* The bit only becomes 1 in the next vblank, so this wait here
5663 * is essentially intel_wait_for_vblank. If we don't have this
5664 * and don't wait for vblanks until the end of crtc_enable, then
5665 * the HW state readout code will complain that the expected
5666 * IPS_CTL value is not the one we read. */
5667 if (intel_wait_for_register(&dev_priv->uncore,
5668 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5669 50))
5670 DRM_ERROR("Timed out waiting for IPS enable\n");
5671 }
5672 }
5673
5674 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5675 {
5676 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5677 struct drm_device *dev = crtc->base.dev;
5678 struct drm_i915_private *dev_priv = to_i915(dev);
5679
5680 if (!crtc_state->ips_enabled)
5681 return;
5682
5683 if (IS_BROADWELL(dev_priv)) {
5684 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5685 /*
5686 * Wait for PCODE to finish disabling IPS. The BSpec specified
5687 * 42ms timeout value leads to occasional timeouts so use 100ms
5688 * instead.
5689 */
5690 if (intel_wait_for_register(&dev_priv->uncore,
5691 IPS_CTL, IPS_ENABLE, 0,
5692 100))
5693 DRM_ERROR("Timed out waiting for IPS disable\n");
5694 } else {
5695 I915_WRITE(IPS_CTL, 0);
5696 POSTING_READ(IPS_CTL);
5697 }
5698
5699 /* We need to wait for a vblank before we can disable the plane. */
5700 intel_wait_for_vblank(dev_priv, crtc->pipe);
5701 }
5702
5703 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5704 {
5705 if (intel_crtc->overlay) {
5706 struct drm_device *dev = intel_crtc->base.dev;
5707
5708 mutex_lock(&dev->struct_mutex);
5709 (void) intel_overlay_switch_off(intel_crtc->overlay);
5710 mutex_unlock(&dev->struct_mutex);
5711 }
5712
5713 /* Let userspace switch the overlay on again. In most cases userspace
5714 * has to recompute where to put it anyway.
5715 */
5716 }
5717
5718 /**
5719 * intel_post_enable_primary - Perform operations after enabling primary plane
5720 * @crtc: the CRTC whose primary plane was just enabled
5721 * @new_crtc_state: the enabling state
5722 *
5723 * Performs potentially sleeping operations that must be done after the primary
5724 * plane is enabled, such as updating FBC and IPS. Note that this may be
5725 * called due to an explicit primary plane update, or due to an implicit
5726 * re-enable that is caused when a sprite plane is updated to no longer
5727 * completely hide the primary plane.
5728 */
5729 static void
5730 intel_post_enable_primary(struct drm_crtc *crtc,
5731 const struct intel_crtc_state *new_crtc_state)
5732 {
5733 struct drm_device *dev = crtc->dev;
5734 struct drm_i915_private *dev_priv = to_i915(dev);
5735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5736 int pipe = intel_crtc->pipe;
5737
5738 /*
5739 * Gen2 reports pipe underruns whenever all planes are disabled.
5740 * So don't enable underrun reporting before at least some planes
5741 * are enabled.
5742 * FIXME: Need to fix the logic to work when we turn off all planes
5743 * but leave the pipe running.
5744 */
5745 if (IS_GEN(dev_priv, 2))
5746 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5747
5748 /* Underruns don't always raise interrupts, so check manually. */
5749 intel_check_cpu_fifo_underruns(dev_priv);
5750 intel_check_pch_fifo_underruns(dev_priv);
5751 }
5752
5753 /* FIXME get rid of this and use pre_plane_update */
5754 static void
5755 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5756 {
5757 struct drm_device *dev = crtc->dev;
5758 struct drm_i915_private *dev_priv = to_i915(dev);
5759 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5760 int pipe = intel_crtc->pipe;
5761
5762 /*
5763 * Gen2 reports pipe underruns whenever all planes are disabled.
5764 * So disable underrun reporting before all the planes get disabled.
5765 */
5766 if (IS_GEN(dev_priv, 2))
5767 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5768
5769 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5770
5771 /*
5772 * Vblank time updates from the shadow to live plane control register
5773 * are blocked if the memory self-refresh mode is active at that
5774 * moment. So to make sure the plane gets truly disabled, disable
5775 * first the self-refresh mode. The self-refresh enable bit in turn
5776 * will be checked/applied by the HW only at the next frame start
5777 * event which is after the vblank start event, so we need to have a
5778 * wait-for-vblank between disabling the plane and the pipe.
5779 */
5780 if (HAS_GMCH(dev_priv) &&
5781 intel_set_memory_cxsr(dev_priv, false))
5782 intel_wait_for_vblank(dev_priv, pipe);
5783 }
5784
5785 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5786 const struct intel_crtc_state *new_crtc_state)
5787 {
5788 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5789 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5790
5791 if (!old_crtc_state->ips_enabled)
5792 return false;
5793
5794 if (needs_modeset(&new_crtc_state->base))
5795 return true;
5796
5797 /*
5798 * Workaround : Do not read or write the pipe palette/gamma data while
5799 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5800 *
5801 * Disable IPS before we program the LUT.
5802 */
5803 if (IS_HASWELL(dev_priv) &&
5804 (new_crtc_state->base.color_mgmt_changed ||
5805 new_crtc_state->update_pipe) &&
5806 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5807 return true;
5808
5809 return !new_crtc_state->ips_enabled;
5810 }
5811
5812 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5813 const struct intel_crtc_state *new_crtc_state)
5814 {
5815 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5816 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5817
5818 if (!new_crtc_state->ips_enabled)
5819 return false;
5820
5821 if (needs_modeset(&new_crtc_state->base))
5822 return true;
5823
5824 /*
5825 * Workaround : Do not read or write the pipe palette/gamma data while
5826 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5827 *
5828 * Re-enable IPS after the LUT has been programmed.
5829 */
5830 if (IS_HASWELL(dev_priv) &&
5831 (new_crtc_state->base.color_mgmt_changed ||
5832 new_crtc_state->update_pipe) &&
5833 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5834 return true;
5835
5836 /*
5837 * We can't read out IPS on broadwell, assume the worst and
5838 * forcibly enable IPS on the first fastset.
5839 */
5840 if (new_crtc_state->update_pipe &&
5841 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5842 return true;
5843
5844 return !old_crtc_state->ips_enabled;
5845 }
5846
5847 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5848 const struct intel_crtc_state *crtc_state)
5849 {
5850 if (!crtc_state->nv12_planes)
5851 return false;
5852
5853 /* WA Display #0827: Gen9:all */
5854 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5855 return true;
5856
5857 return false;
5858 }
5859
5860 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5861 const struct intel_crtc_state *crtc_state)
5862 {
5863 /* Wa_2006604312:icl */
5864 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5865 return true;
5866
5867 return false;
5868 }
5869
5870 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5871 {
5872 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5873 struct drm_device *dev = crtc->base.dev;
5874 struct drm_i915_private *dev_priv = to_i915(dev);
5875 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5876 struct intel_crtc_state *pipe_config =
5877 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5878 crtc);
5879 struct drm_plane *primary = crtc->base.primary;
5880 struct drm_plane_state *old_primary_state =
5881 drm_atomic_get_old_plane_state(old_state, primary);
5882
5883 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5884
5885 if (pipe_config->update_wm_post && pipe_config->base.active)
5886 intel_update_watermarks(crtc);
5887
5888 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5889 hsw_enable_ips(pipe_config);
5890
5891 if (old_primary_state) {
5892 struct drm_plane_state *new_primary_state =
5893 drm_atomic_get_new_plane_state(old_state, primary);
5894
5895 intel_fbc_post_update(crtc);
5896
5897 if (new_primary_state->visible &&
5898 (needs_modeset(&pipe_config->base) ||
5899 !old_primary_state->visible))
5900 intel_post_enable_primary(&crtc->base, pipe_config);
5901 }
5902
5903 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5904 !needs_nv12_wa(dev_priv, pipe_config))
5905 skl_wa_827(dev_priv, crtc->pipe, false);
5906
5907 if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5908 !needs_scalerclk_wa(dev_priv, pipe_config))
5909 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5910 }
5911
5912 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5913 struct intel_crtc_state *pipe_config)
5914 {
5915 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5916 struct drm_device *dev = crtc->base.dev;
5917 struct drm_i915_private *dev_priv = to_i915(dev);
5918 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5919 struct drm_plane *primary = crtc->base.primary;
5920 struct drm_plane_state *old_primary_state =
5921 drm_atomic_get_old_plane_state(old_state, primary);
5922 bool modeset = needs_modeset(&pipe_config->base);
5923 struct intel_atomic_state *old_intel_state =
5924 to_intel_atomic_state(old_state);
5925
5926 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5927 hsw_disable_ips(old_crtc_state);
5928
5929 if (old_primary_state) {
5930 struct intel_plane_state *new_primary_state =
5931 intel_atomic_get_new_plane_state(old_intel_state,
5932 to_intel_plane(primary));
5933
5934 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5935 /*
5936 * Gen2 reports pipe underruns whenever all planes are disabled.
5937 * So disable underrun reporting before all the planes get disabled.
5938 */
5939 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5940 (modeset || !new_primary_state->base.visible))
5941 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5942 }
5943
5944 /* Display WA 827 */
5945 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5946 needs_nv12_wa(dev_priv, pipe_config))
5947 skl_wa_827(dev_priv, crtc->pipe, true);
5948
5949 /* Wa_2006604312:icl */
5950 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5951 needs_scalerclk_wa(dev_priv, pipe_config))
5952 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5953
5954 /*
5955 * Vblank time updates from the shadow to live plane control register
5956 * are blocked if the memory self-refresh mode is active at that
5957 * moment. So to make sure the plane gets truly disabled, disable
5958 * first the self-refresh mode. The self-refresh enable bit in turn
5959 * will be checked/applied by the HW only at the next frame start
5960 * event which is after the vblank start event, so we need to have a
5961 * wait-for-vblank between disabling the plane and the pipe.
5962 */
5963 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5964 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5965 intel_wait_for_vblank(dev_priv, crtc->pipe);
5966
5967 /*
5968 * IVB workaround: must disable low power watermarks for at least
5969 * one frame before enabling scaling. LP watermarks can be re-enabled
5970 * when scaling is disabled.
5971 *
5972 * WaCxSRDisabledForSpriteScaling:ivb
5973 */
5974 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5975 old_crtc_state->base.active)
5976 intel_wait_for_vblank(dev_priv, crtc->pipe);
5977
5978 /*
5979 * If we're doing a modeset, we're done. No need to do any pre-vblank
5980 * watermark programming here.
5981 */
5982 if (needs_modeset(&pipe_config->base))
5983 return;
5984
5985 /*
5986 * For platforms that support atomic watermarks, program the
5987 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5988 * will be the intermediate values that are safe for both pre- and
5989 * post- vblank; when vblank happens, the 'active' values will be set
5990 * to the final 'target' values and we'll do this again to get the
5991 * optimal watermarks. For gen9+ platforms, the values we program here
5992 * will be the final target values which will get automatically latched
5993 * at vblank time; no further programming will be necessary.
5994 *
5995 * If a platform hasn't been transitioned to atomic watermarks yet,
5996 * we'll continue to update watermarks the old way, if flags tell
5997 * us to.
5998 */
5999 if (dev_priv->display.initial_watermarks != NULL)
6000 dev_priv->display.initial_watermarks(old_intel_state,
6001 pipe_config);
6002 else if (pipe_config->update_wm_pre)
6003 intel_update_watermarks(crtc);
6004 }
6005
6006 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6007 struct intel_crtc *crtc)
6008 {
6009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6010 const struct intel_crtc_state *new_crtc_state =
6011 intel_atomic_get_new_crtc_state(state, crtc);
6012 unsigned int update_mask = new_crtc_state->update_planes;
6013 const struct intel_plane_state *old_plane_state;
6014 struct intel_plane *plane;
6015 unsigned fb_bits = 0;
6016 int i;
6017
6018 intel_crtc_dpms_overlay_disable(crtc);
6019
6020 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6021 if (crtc->pipe != plane->pipe ||
6022 !(update_mask & BIT(plane->id)))
6023 continue;
6024
6025 intel_disable_plane(plane, new_crtc_state);
6026
6027 if (old_plane_state->base.visible)
6028 fb_bits |= plane->frontbuffer_bit;
6029 }
6030
6031 intel_frontbuffer_flip(dev_priv, fb_bits);
6032 }
6033
6034 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
6035 struct intel_crtc_state *crtc_state,
6036 struct drm_atomic_state *old_state)
6037 {
6038 struct drm_connector_state *conn_state;
6039 struct drm_connector *conn;
6040 int i;
6041
6042 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6043 struct intel_encoder *encoder =
6044 to_intel_encoder(conn_state->best_encoder);
6045
6046 if (conn_state->crtc != crtc)
6047 continue;
6048
6049 if (encoder->pre_pll_enable)
6050 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6051 }
6052 }
6053
6054 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
6055 struct intel_crtc_state *crtc_state,
6056 struct drm_atomic_state *old_state)
6057 {
6058 struct drm_connector_state *conn_state;
6059 struct drm_connector *conn;
6060 int i;
6061
6062 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6063 struct intel_encoder *encoder =
6064 to_intel_encoder(conn_state->best_encoder);
6065
6066 if (conn_state->crtc != crtc)
6067 continue;
6068
6069 if (encoder->pre_enable)
6070 encoder->pre_enable(encoder, crtc_state, conn_state);
6071 }
6072 }
6073
6074 static void intel_encoders_enable(struct drm_crtc *crtc,
6075 struct intel_crtc_state *crtc_state,
6076 struct drm_atomic_state *old_state)
6077 {
6078 struct drm_connector_state *conn_state;
6079 struct drm_connector *conn;
6080 int i;
6081
6082 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6083 struct intel_encoder *encoder =
6084 to_intel_encoder(conn_state->best_encoder);
6085
6086 if (conn_state->crtc != crtc)
6087 continue;
6088
6089 if (encoder->enable)
6090 encoder->enable(encoder, crtc_state, conn_state);
6091 intel_opregion_notify_encoder(encoder, true);
6092 }
6093 }
6094
6095 static void intel_encoders_disable(struct drm_crtc *crtc,
6096 struct intel_crtc_state *old_crtc_state,
6097 struct drm_atomic_state *old_state)
6098 {
6099 struct drm_connector_state *old_conn_state;
6100 struct drm_connector *conn;
6101 int i;
6102
6103 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6104 struct intel_encoder *encoder =
6105 to_intel_encoder(old_conn_state->best_encoder);
6106
6107 if (old_conn_state->crtc != crtc)
6108 continue;
6109
6110 intel_opregion_notify_encoder(encoder, false);
6111 if (encoder->disable)
6112 encoder->disable(encoder, old_crtc_state, old_conn_state);
6113 }
6114 }
6115
6116 static void intel_encoders_post_disable(struct drm_crtc *crtc,
6117 struct intel_crtc_state *old_crtc_state,
6118 struct drm_atomic_state *old_state)
6119 {
6120 struct drm_connector_state *old_conn_state;
6121 struct drm_connector *conn;
6122 int i;
6123
6124 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6125 struct intel_encoder *encoder =
6126 to_intel_encoder(old_conn_state->best_encoder);
6127
6128 if (old_conn_state->crtc != crtc)
6129 continue;
6130
6131 if (encoder->post_disable)
6132 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6133 }
6134 }
6135
6136 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
6137 struct intel_crtc_state *old_crtc_state,
6138 struct drm_atomic_state *old_state)
6139 {
6140 struct drm_connector_state *old_conn_state;
6141 struct drm_connector *conn;
6142 int i;
6143
6144 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6145 struct intel_encoder *encoder =
6146 to_intel_encoder(old_conn_state->best_encoder);
6147
6148 if (old_conn_state->crtc != crtc)
6149 continue;
6150
6151 if (encoder->post_pll_disable)
6152 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6153 }
6154 }
6155
6156 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
6157 struct intel_crtc_state *crtc_state,
6158 struct drm_atomic_state *old_state)
6159 {
6160 struct drm_connector_state *conn_state;
6161 struct drm_connector *conn;
6162 int i;
6163
6164 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6165 struct intel_encoder *encoder =
6166 to_intel_encoder(conn_state->best_encoder);
6167
6168 if (conn_state->crtc != crtc)
6169 continue;
6170
6171 if (encoder->update_pipe)
6172 encoder->update_pipe(encoder, crtc_state, conn_state);
6173 }
6174 }
6175
6176 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6177 {
6178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6179 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6180
6181 plane->disable_plane(plane, crtc_state);
6182 }
6183
6184 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6185 struct drm_atomic_state *old_state)
6186 {
6187 struct drm_crtc *crtc = pipe_config->base.crtc;
6188 struct drm_device *dev = crtc->dev;
6189 struct drm_i915_private *dev_priv = to_i915(dev);
6190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6191 int pipe = intel_crtc->pipe;
6192 struct intel_atomic_state *old_intel_state =
6193 to_intel_atomic_state(old_state);
6194
6195 if (WARN_ON(intel_crtc->active))
6196 return;
6197
6198 /*
6199 * Sometimes spurious CPU pipe underruns happen during FDI
6200 * training, at least with VGA+HDMI cloning. Suppress them.
6201 *
6202 * On ILK we get an occasional spurious CPU pipe underruns
6203 * between eDP port A enable and vdd enable. Also PCH port
6204 * enable seems to result in the occasional CPU pipe underrun.
6205 *
6206 * Spurious PCH underruns also occur during PCH enabling.
6207 */
6208 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6209 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6210
6211 if (pipe_config->has_pch_encoder)
6212 intel_prepare_shared_dpll(pipe_config);
6213
6214 if (intel_crtc_has_dp_encoder(pipe_config))
6215 intel_dp_set_m_n(pipe_config, M1_N1);
6216
6217 intel_set_pipe_timings(pipe_config);
6218 intel_set_pipe_src_size(pipe_config);
6219
6220 if (pipe_config->has_pch_encoder) {
6221 intel_cpu_transcoder_set_m_n(pipe_config,
6222 &pipe_config->fdi_m_n, NULL);
6223 }
6224
6225 ironlake_set_pipeconf(pipe_config);
6226
6227 intel_crtc->active = true;
6228
6229 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6230
6231 if (pipe_config->has_pch_encoder) {
6232 /* Note: FDI PLL enabling _must_ be done before we enable the
6233 * cpu pipes, hence this is separate from all the other fdi/pch
6234 * enabling. */
6235 ironlake_fdi_pll_enable(pipe_config);
6236 } else {
6237 assert_fdi_tx_disabled(dev_priv, pipe);
6238 assert_fdi_rx_disabled(dev_priv, pipe);
6239 }
6240
6241 ironlake_pfit_enable(pipe_config);
6242
6243 /*
6244 * On ILK+ LUT must be loaded before the pipe is running but with
6245 * clocks enabled
6246 */
6247 intel_color_load_luts(pipe_config);
6248 intel_color_commit(pipe_config);
6249 /* update DSPCNTR to configure gamma for pipe bottom color */
6250 intel_disable_primary_plane(pipe_config);
6251
6252 if (dev_priv->display.initial_watermarks != NULL)
6253 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6254 intel_enable_pipe(pipe_config);
6255
6256 if (pipe_config->has_pch_encoder)
6257 ironlake_pch_enable(old_intel_state, pipe_config);
6258
6259 assert_vblank_disabled(crtc);
6260 intel_crtc_vblank_on(pipe_config);
6261
6262 intel_encoders_enable(crtc, pipe_config, old_state);
6263
6264 if (HAS_PCH_CPT(dev_priv))
6265 cpt_verify_modeset(dev, intel_crtc->pipe);
6266
6267 /*
6268 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6269 * And a second vblank wait is needed at least on ILK with
6270 * some interlaced HDMI modes. Let's do the double wait always
6271 * in case there are more corner cases we don't know about.
6272 */
6273 if (pipe_config->has_pch_encoder) {
6274 intel_wait_for_vblank(dev_priv, pipe);
6275 intel_wait_for_vblank(dev_priv, pipe);
6276 }
6277 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6278 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6279 }
6280
6281 /* IPS only exists on ULT machines and is tied to pipe A. */
6282 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6283 {
6284 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6285 }
6286
6287 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6288 enum pipe pipe, bool apply)
6289 {
6290 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6291 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6292
6293 if (apply)
6294 val |= mask;
6295 else
6296 val &= ~mask;
6297
6298 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6299 }
6300
6301 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6302 {
6303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6304 enum pipe pipe = crtc->pipe;
6305 u32 val;
6306
6307 val = MBUS_DBOX_A_CREDIT(2);
6308 val |= MBUS_DBOX_BW_CREDIT(1);
6309 val |= MBUS_DBOX_B_CREDIT(8);
6310
6311 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6312 }
6313
6314 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6315 struct drm_atomic_state *old_state)
6316 {
6317 struct drm_crtc *crtc = pipe_config->base.crtc;
6318 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6320 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6321 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6322 struct intel_atomic_state *old_intel_state =
6323 to_intel_atomic_state(old_state);
6324 bool psl_clkgate_wa;
6325
6326 if (WARN_ON(intel_crtc->active))
6327 return;
6328
6329 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6330
6331 if (pipe_config->shared_dpll)
6332 intel_enable_shared_dpll(pipe_config);
6333
6334 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6335
6336 if (intel_crtc_has_dp_encoder(pipe_config))
6337 intel_dp_set_m_n(pipe_config, M1_N1);
6338
6339 if (!transcoder_is_dsi(cpu_transcoder))
6340 intel_set_pipe_timings(pipe_config);
6341
6342 intel_set_pipe_src_size(pipe_config);
6343
6344 if (cpu_transcoder != TRANSCODER_EDP &&
6345 !transcoder_is_dsi(cpu_transcoder)) {
6346 I915_WRITE(PIPE_MULT(cpu_transcoder),
6347 pipe_config->pixel_multiplier - 1);
6348 }
6349
6350 if (pipe_config->has_pch_encoder) {
6351 intel_cpu_transcoder_set_m_n(pipe_config,
6352 &pipe_config->fdi_m_n, NULL);
6353 }
6354
6355 if (!transcoder_is_dsi(cpu_transcoder))
6356 haswell_set_pipeconf(pipe_config);
6357
6358 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6359 bdw_set_pipemisc(pipe_config);
6360
6361 intel_crtc->active = true;
6362
6363 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6364 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6365 pipe_config->pch_pfit.enabled;
6366 if (psl_clkgate_wa)
6367 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6368
6369 if (INTEL_GEN(dev_priv) >= 9)
6370 skylake_pfit_enable(pipe_config);
6371 else
6372 ironlake_pfit_enable(pipe_config);
6373
6374 /*
6375 * On ILK+ LUT must be loaded before the pipe is running but with
6376 * clocks enabled
6377 */
6378 intel_color_load_luts(pipe_config);
6379 intel_color_commit(pipe_config);
6380 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6381 if (INTEL_GEN(dev_priv) < 9)
6382 intel_disable_primary_plane(pipe_config);
6383
6384 if (INTEL_GEN(dev_priv) >= 11)
6385 icl_set_pipe_chicken(intel_crtc);
6386
6387 intel_ddi_set_pipe_settings(pipe_config);
6388 if (!transcoder_is_dsi(cpu_transcoder))
6389 intel_ddi_enable_transcoder_func(pipe_config);
6390
6391 if (dev_priv->display.initial_watermarks != NULL)
6392 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6393
6394 if (INTEL_GEN(dev_priv) >= 11)
6395 icl_pipe_mbus_enable(intel_crtc);
6396
6397 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6398 if (!transcoder_is_dsi(cpu_transcoder))
6399 intel_enable_pipe(pipe_config);
6400
6401 if (pipe_config->has_pch_encoder)
6402 lpt_pch_enable(old_intel_state, pipe_config);
6403
6404 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6405 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6406
6407 assert_vblank_disabled(crtc);
6408 intel_crtc_vblank_on(pipe_config);
6409
6410 intel_encoders_enable(crtc, pipe_config, old_state);
6411
6412 if (psl_clkgate_wa) {
6413 intel_wait_for_vblank(dev_priv, pipe);
6414 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6415 }
6416
6417 /* If we change the relative order between pipe/planes enabling, we need
6418 * to change the workaround. */
6419 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6420 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6421 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6422 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6423 }
6424 }
6425
6426 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6427 {
6428 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6429 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6430 enum pipe pipe = crtc->pipe;
6431
6432 /* To avoid upsetting the power well on haswell only disable the pfit if
6433 * it's in use. The hw state code will make sure we get this right. */
6434 if (old_crtc_state->pch_pfit.enabled) {
6435 I915_WRITE(PF_CTL(pipe), 0);
6436 I915_WRITE(PF_WIN_POS(pipe), 0);
6437 I915_WRITE(PF_WIN_SZ(pipe), 0);
6438 }
6439 }
6440
6441 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6442 struct drm_atomic_state *old_state)
6443 {
6444 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6445 struct drm_device *dev = crtc->dev;
6446 struct drm_i915_private *dev_priv = to_i915(dev);
6447 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6448 int pipe = intel_crtc->pipe;
6449
6450 /*
6451 * Sometimes spurious CPU pipe underruns happen when the
6452 * pipe is already disabled, but FDI RX/TX is still enabled.
6453 * Happens at least with VGA+HDMI cloning. Suppress them.
6454 */
6455 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6456 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6457
6458 intel_encoders_disable(crtc, old_crtc_state, old_state);
6459
6460 drm_crtc_vblank_off(crtc);
6461 assert_vblank_disabled(crtc);
6462
6463 intel_disable_pipe(old_crtc_state);
6464
6465 ironlake_pfit_disable(old_crtc_state);
6466
6467 if (old_crtc_state->has_pch_encoder)
6468 ironlake_fdi_disable(crtc);
6469
6470 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6471
6472 if (old_crtc_state->has_pch_encoder) {
6473 ironlake_disable_pch_transcoder(dev_priv, pipe);
6474
6475 if (HAS_PCH_CPT(dev_priv)) {
6476 i915_reg_t reg;
6477 u32 temp;
6478
6479 /* disable TRANS_DP_CTL */
6480 reg = TRANS_DP_CTL(pipe);
6481 temp = I915_READ(reg);
6482 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6483 TRANS_DP_PORT_SEL_MASK);
6484 temp |= TRANS_DP_PORT_SEL_NONE;
6485 I915_WRITE(reg, temp);
6486
6487 /* disable DPLL_SEL */
6488 temp = I915_READ(PCH_DPLL_SEL);
6489 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6490 I915_WRITE(PCH_DPLL_SEL, temp);
6491 }
6492
6493 ironlake_fdi_pll_disable(intel_crtc);
6494 }
6495
6496 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6497 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6498 }
6499
6500 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6501 struct drm_atomic_state *old_state)
6502 {
6503 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6504 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6505 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6506 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6507
6508 intel_encoders_disable(crtc, old_crtc_state, old_state);
6509
6510 drm_crtc_vblank_off(crtc);
6511 assert_vblank_disabled(crtc);
6512
6513 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6514 if (!transcoder_is_dsi(cpu_transcoder))
6515 intel_disable_pipe(old_crtc_state);
6516
6517 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6518 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6519
6520 if (!transcoder_is_dsi(cpu_transcoder))
6521 intel_ddi_disable_transcoder_func(old_crtc_state);
6522
6523 intel_dsc_disable(old_crtc_state);
6524
6525 if (INTEL_GEN(dev_priv) >= 9)
6526 skylake_scaler_disable(intel_crtc);
6527 else
6528 ironlake_pfit_disable(old_crtc_state);
6529
6530 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6531
6532 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6533 }
6534
6535 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6536 {
6537 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6539
6540 if (!crtc_state->gmch_pfit.control)
6541 return;
6542
6543 /*
6544 * The panel fitter should only be adjusted whilst the pipe is disabled,
6545 * according to register description and PRM.
6546 */
6547 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6548 assert_pipe_disabled(dev_priv, crtc->pipe);
6549
6550 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6551 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6552
6553 /* Border color in case we don't scale up to the full screen. Black by
6554 * default, change to something else for debugging. */
6555 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6556 }
6557
6558 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6559 {
6560 if (port == PORT_NONE)
6561 return false;
6562
6563 if (IS_ELKHARTLAKE(dev_priv))
6564 return port <= PORT_C;
6565
6566 if (INTEL_GEN(dev_priv) >= 11)
6567 return port <= PORT_B;
6568
6569 return false;
6570 }
6571
6572 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6573 {
6574 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6575 return port >= PORT_C && port <= PORT_F;
6576
6577 return false;
6578 }
6579
6580 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6581 {
6582 if (!intel_port_is_tc(dev_priv, port))
6583 return PORT_TC_NONE;
6584
6585 return port - PORT_C;
6586 }
6587
6588 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6589 {
6590 switch (port) {
6591 case PORT_A:
6592 return POWER_DOMAIN_PORT_DDI_A_LANES;
6593 case PORT_B:
6594 return POWER_DOMAIN_PORT_DDI_B_LANES;
6595 case PORT_C:
6596 return POWER_DOMAIN_PORT_DDI_C_LANES;
6597 case PORT_D:
6598 return POWER_DOMAIN_PORT_DDI_D_LANES;
6599 case PORT_E:
6600 return POWER_DOMAIN_PORT_DDI_E_LANES;
6601 case PORT_F:
6602 return POWER_DOMAIN_PORT_DDI_F_LANES;
6603 default:
6604 MISSING_CASE(port);
6605 return POWER_DOMAIN_PORT_OTHER;
6606 }
6607 }
6608
6609 enum intel_display_power_domain
6610 intel_aux_power_domain(struct intel_digital_port *dig_port)
6611 {
6612 switch (dig_port->aux_ch) {
6613 case AUX_CH_A:
6614 return POWER_DOMAIN_AUX_A;
6615 case AUX_CH_B:
6616 return POWER_DOMAIN_AUX_B;
6617 case AUX_CH_C:
6618 return POWER_DOMAIN_AUX_C;
6619 case AUX_CH_D:
6620 return POWER_DOMAIN_AUX_D;
6621 case AUX_CH_E:
6622 return POWER_DOMAIN_AUX_E;
6623 case AUX_CH_F:
6624 return POWER_DOMAIN_AUX_F;
6625 default:
6626 MISSING_CASE(dig_port->aux_ch);
6627 return POWER_DOMAIN_AUX_A;
6628 }
6629 }
6630
6631 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6632 struct intel_crtc_state *crtc_state)
6633 {
6634 struct drm_device *dev = crtc->dev;
6635 struct drm_i915_private *dev_priv = to_i915(dev);
6636 struct drm_encoder *encoder;
6637 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6638 enum pipe pipe = intel_crtc->pipe;
6639 u64 mask;
6640 enum transcoder transcoder = crtc_state->cpu_transcoder;
6641
6642 if (!crtc_state->base.active)
6643 return 0;
6644
6645 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6646 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6647 if (crtc_state->pch_pfit.enabled ||
6648 crtc_state->pch_pfit.force_thru)
6649 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6650
6651 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6652 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6653
6654 mask |= BIT_ULL(intel_encoder->power_domain);
6655 }
6656
6657 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6658 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6659
6660 if (crtc_state->shared_dpll)
6661 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6662
6663 return mask;
6664 }
6665
6666 static u64
6667 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6668 struct intel_crtc_state *crtc_state)
6669 {
6670 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6671 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6672 enum intel_display_power_domain domain;
6673 u64 domains, new_domains, old_domains;
6674
6675 old_domains = intel_crtc->enabled_power_domains;
6676 intel_crtc->enabled_power_domains = new_domains =
6677 get_crtc_power_domains(crtc, crtc_state);
6678
6679 domains = new_domains & ~old_domains;
6680
6681 for_each_power_domain(domain, domains)
6682 intel_display_power_get(dev_priv, domain);
6683
6684 return old_domains & ~new_domains;
6685 }
6686
6687 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6688 u64 domains)
6689 {
6690 enum intel_display_power_domain domain;
6691
6692 for_each_power_domain(domain, domains)
6693 intel_display_power_put_unchecked(dev_priv, domain);
6694 }
6695
6696 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6697 struct drm_atomic_state *old_state)
6698 {
6699 struct intel_atomic_state *old_intel_state =
6700 to_intel_atomic_state(old_state);
6701 struct drm_crtc *crtc = pipe_config->base.crtc;
6702 struct drm_device *dev = crtc->dev;
6703 struct drm_i915_private *dev_priv = to_i915(dev);
6704 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6705 int pipe = intel_crtc->pipe;
6706
6707 if (WARN_ON(intel_crtc->active))
6708 return;
6709
6710 if (intel_crtc_has_dp_encoder(pipe_config))
6711 intel_dp_set_m_n(pipe_config, M1_N1);
6712
6713 intel_set_pipe_timings(pipe_config);
6714 intel_set_pipe_src_size(pipe_config);
6715
6716 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6717 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6718 I915_WRITE(CHV_CANVAS(pipe), 0);
6719 }
6720
6721 i9xx_set_pipeconf(pipe_config);
6722
6723 intel_crtc->active = true;
6724
6725 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6726
6727 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6728
6729 if (IS_CHERRYVIEW(dev_priv)) {
6730 chv_prepare_pll(intel_crtc, pipe_config);
6731 chv_enable_pll(intel_crtc, pipe_config);
6732 } else {
6733 vlv_prepare_pll(intel_crtc, pipe_config);
6734 vlv_enable_pll(intel_crtc, pipe_config);
6735 }
6736
6737 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6738
6739 i9xx_pfit_enable(pipe_config);
6740
6741 intel_color_load_luts(pipe_config);
6742 intel_color_commit(pipe_config);
6743 /* update DSPCNTR to configure gamma for pipe bottom color */
6744 intel_disable_primary_plane(pipe_config);
6745
6746 dev_priv->display.initial_watermarks(old_intel_state,
6747 pipe_config);
6748 intel_enable_pipe(pipe_config);
6749
6750 assert_vblank_disabled(crtc);
6751 intel_crtc_vblank_on(pipe_config);
6752
6753 intel_encoders_enable(crtc, pipe_config, old_state);
6754 }
6755
6756 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6757 {
6758 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6759 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6760
6761 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6762 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6763 }
6764
6765 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6766 struct drm_atomic_state *old_state)
6767 {
6768 struct intel_atomic_state *old_intel_state =
6769 to_intel_atomic_state(old_state);
6770 struct drm_crtc *crtc = pipe_config->base.crtc;
6771 struct drm_device *dev = crtc->dev;
6772 struct drm_i915_private *dev_priv = to_i915(dev);
6773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6774 enum pipe pipe = intel_crtc->pipe;
6775
6776 if (WARN_ON(intel_crtc->active))
6777 return;
6778
6779 i9xx_set_pll_dividers(pipe_config);
6780
6781 if (intel_crtc_has_dp_encoder(pipe_config))
6782 intel_dp_set_m_n(pipe_config, M1_N1);
6783
6784 intel_set_pipe_timings(pipe_config);
6785 intel_set_pipe_src_size(pipe_config);
6786
6787 i9xx_set_pipeconf(pipe_config);
6788
6789 intel_crtc->active = true;
6790
6791 if (!IS_GEN(dev_priv, 2))
6792 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6793
6794 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6795
6796 i9xx_enable_pll(intel_crtc, pipe_config);
6797
6798 i9xx_pfit_enable(pipe_config);
6799
6800 intel_color_load_luts(pipe_config);
6801 intel_color_commit(pipe_config);
6802 /* update DSPCNTR to configure gamma for pipe bottom color */
6803 intel_disable_primary_plane(pipe_config);
6804
6805 if (dev_priv->display.initial_watermarks != NULL)
6806 dev_priv->display.initial_watermarks(old_intel_state,
6807 pipe_config);
6808 else
6809 intel_update_watermarks(intel_crtc);
6810 intel_enable_pipe(pipe_config);
6811
6812 assert_vblank_disabled(crtc);
6813 intel_crtc_vblank_on(pipe_config);
6814
6815 intel_encoders_enable(crtc, pipe_config, old_state);
6816 }
6817
6818 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6819 {
6820 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6822
6823 if (!old_crtc_state->gmch_pfit.control)
6824 return;
6825
6826 assert_pipe_disabled(dev_priv, crtc->pipe);
6827
6828 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6829 I915_READ(PFIT_CONTROL));
6830 I915_WRITE(PFIT_CONTROL, 0);
6831 }
6832
6833 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6834 struct drm_atomic_state *old_state)
6835 {
6836 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6837 struct drm_device *dev = crtc->dev;
6838 struct drm_i915_private *dev_priv = to_i915(dev);
6839 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6840 int pipe = intel_crtc->pipe;
6841
6842 /*
6843 * On gen2 planes are double buffered but the pipe isn't, so we must
6844 * wait for planes to fully turn off before disabling the pipe.
6845 */
6846 if (IS_GEN(dev_priv, 2))
6847 intel_wait_for_vblank(dev_priv, pipe);
6848
6849 intel_encoders_disable(crtc, old_crtc_state, old_state);
6850
6851 drm_crtc_vblank_off(crtc);
6852 assert_vblank_disabled(crtc);
6853
6854 intel_disable_pipe(old_crtc_state);
6855
6856 i9xx_pfit_disable(old_crtc_state);
6857
6858 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6859
6860 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6861 if (IS_CHERRYVIEW(dev_priv))
6862 chv_disable_pll(dev_priv, pipe);
6863 else if (IS_VALLEYVIEW(dev_priv))
6864 vlv_disable_pll(dev_priv, pipe);
6865 else
6866 i9xx_disable_pll(old_crtc_state);
6867 }
6868
6869 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6870
6871 if (!IS_GEN(dev_priv, 2))
6872 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6873
6874 if (!dev_priv->display.initial_watermarks)
6875 intel_update_watermarks(intel_crtc);
6876
6877 /* clock the pipe down to 640x480@60 to potentially save power */
6878 if (IS_I830(dev_priv))
6879 i830_enable_pipe(dev_priv, pipe);
6880 }
6881
6882 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6883 struct drm_modeset_acquire_ctx *ctx)
6884 {
6885 struct intel_encoder *encoder;
6886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6887 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6888 struct intel_bw_state *bw_state =
6889 to_intel_bw_state(dev_priv->bw_obj.state);
6890 enum intel_display_power_domain domain;
6891 struct intel_plane *plane;
6892 u64 domains;
6893 struct drm_atomic_state *state;
6894 struct intel_crtc_state *crtc_state;
6895 int ret;
6896
6897 if (!intel_crtc->active)
6898 return;
6899
6900 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6901 const struct intel_plane_state *plane_state =
6902 to_intel_plane_state(plane->base.state);
6903
6904 if (plane_state->base.visible)
6905 intel_plane_disable_noatomic(intel_crtc, plane);
6906 }
6907
6908 state = drm_atomic_state_alloc(crtc->dev);
6909 if (!state) {
6910 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6911 crtc->base.id, crtc->name);
6912 return;
6913 }
6914
6915 state->acquire_ctx = ctx;
6916
6917 /* Everything's already locked, -EDEADLK can't happen. */
6918 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6919 ret = drm_atomic_add_affected_connectors(state, crtc);
6920
6921 WARN_ON(IS_ERR(crtc_state) || ret);
6922
6923 dev_priv->display.crtc_disable(crtc_state, state);
6924
6925 drm_atomic_state_put(state);
6926
6927 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6928 crtc->base.id, crtc->name);
6929
6930 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6931 crtc->state->active = false;
6932 intel_crtc->active = false;
6933 crtc->enabled = false;
6934 crtc->state->connector_mask = 0;
6935 crtc->state->encoder_mask = 0;
6936
6937 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6938 encoder->base.crtc = NULL;
6939
6940 intel_fbc_disable(intel_crtc);
6941 intel_update_watermarks(intel_crtc);
6942 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6943
6944 domains = intel_crtc->enabled_power_domains;
6945 for_each_power_domain(domain, domains)
6946 intel_display_power_put_unchecked(dev_priv, domain);
6947 intel_crtc->enabled_power_domains = 0;
6948
6949 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6950 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6951 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6952
6953 bw_state->data_rate[intel_crtc->pipe] = 0;
6954 bw_state->num_active_planes[intel_crtc->pipe] = 0;
6955 }
6956
6957 /*
6958 * turn all crtc's off, but do not adjust state
6959 * This has to be paired with a call to intel_modeset_setup_hw_state.
6960 */
6961 int intel_display_suspend(struct drm_device *dev)
6962 {
6963 struct drm_i915_private *dev_priv = to_i915(dev);
6964 struct drm_atomic_state *state;
6965 int ret;
6966
6967 state = drm_atomic_helper_suspend(dev);
6968 ret = PTR_ERR_OR_ZERO(state);
6969 if (ret)
6970 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6971 else
6972 dev_priv->modeset_restore_state = state;
6973 return ret;
6974 }
6975
6976 void intel_encoder_destroy(struct drm_encoder *encoder)
6977 {
6978 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6979
6980 drm_encoder_cleanup(encoder);
6981 kfree(intel_encoder);
6982 }
6983
6984 /* Cross check the actual hw state with our own modeset state tracking (and it's
6985 * internal consistency). */
6986 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6987 struct drm_connector_state *conn_state)
6988 {
6989 struct intel_connector *connector = to_intel_connector(conn_state->connector);
6990
6991 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6992 connector->base.base.id,
6993 connector->base.name);
6994
6995 if (connector->get_hw_state(connector)) {
6996 struct intel_encoder *encoder = connector->encoder;
6997
6998 I915_STATE_WARN(!crtc_state,
6999 "connector enabled without attached crtc\n");
7000
7001 if (!crtc_state)
7002 return;
7003
7004 I915_STATE_WARN(!crtc_state->active,
7005 "connector is active, but attached crtc isn't\n");
7006
7007 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7008 return;
7009
7010 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7011 "atomic encoder doesn't match attached encoder\n");
7012
7013 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7014 "attached encoder crtc differs from connector crtc\n");
7015 } else {
7016 I915_STATE_WARN(crtc_state && crtc_state->active,
7017 "attached crtc is active, but connector isn't\n");
7018 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7019 "best encoder set without crtc!\n");
7020 }
7021 }
7022
7023 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7024 {
7025 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7026 return crtc_state->fdi_lanes;
7027
7028 return 0;
7029 }
7030
7031 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7032 struct intel_crtc_state *pipe_config)
7033 {
7034 struct drm_i915_private *dev_priv = to_i915(dev);
7035 struct drm_atomic_state *state = pipe_config->base.state;
7036 struct intel_crtc *other_crtc;
7037 struct intel_crtc_state *other_crtc_state;
7038
7039 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7040 pipe_name(pipe), pipe_config->fdi_lanes);
7041 if (pipe_config->fdi_lanes > 4) {
7042 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7043 pipe_name(pipe), pipe_config->fdi_lanes);
7044 return -EINVAL;
7045 }
7046
7047 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7048 if (pipe_config->fdi_lanes > 2) {
7049 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7050 pipe_config->fdi_lanes);
7051 return -EINVAL;
7052 } else {
7053 return 0;
7054 }
7055 }
7056
7057 if (INTEL_INFO(dev_priv)->num_pipes == 2)
7058 return 0;
7059
7060 /* Ivybridge 3 pipe is really complicated */
7061 switch (pipe) {
7062 case PIPE_A:
7063 return 0;
7064 case PIPE_B:
7065 if (pipe_config->fdi_lanes <= 2)
7066 return 0;
7067
7068 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7069 other_crtc_state =
7070 intel_atomic_get_crtc_state(state, other_crtc);
7071 if (IS_ERR(other_crtc_state))
7072 return PTR_ERR(other_crtc_state);
7073
7074 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7075 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7076 pipe_name(pipe), pipe_config->fdi_lanes);
7077 return -EINVAL;
7078 }
7079 return 0;
7080 case PIPE_C:
7081 if (pipe_config->fdi_lanes > 2) {
7082 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7083 pipe_name(pipe), pipe_config->fdi_lanes);
7084 return -EINVAL;
7085 }
7086
7087 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7088 other_crtc_state =
7089 intel_atomic_get_crtc_state(state, other_crtc);
7090 if (IS_ERR(other_crtc_state))
7091 return PTR_ERR(other_crtc_state);
7092
7093 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7094 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7095 return -EINVAL;
7096 }
7097 return 0;
7098 default:
7099 BUG();
7100 }
7101 }
7102
7103 #define RETRY 1
7104 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7105 struct intel_crtc_state *pipe_config)
7106 {
7107 struct drm_device *dev = intel_crtc->base.dev;
7108 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7109 int lane, link_bw, fdi_dotclock, ret;
7110 bool needs_recompute = false;
7111
7112 retry:
7113 /* FDI is a binary signal running at ~2.7GHz, encoding
7114 * each output octet as 10 bits. The actual frequency
7115 * is stored as a divider into a 100MHz clock, and the
7116 * mode pixel clock is stored in units of 1KHz.
7117 * Hence the bw of each lane in terms of the mode signal
7118 * is:
7119 */
7120 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7121
7122 fdi_dotclock = adjusted_mode->crtc_clock;
7123
7124 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7125 pipe_config->pipe_bpp);
7126
7127 pipe_config->fdi_lanes = lane;
7128
7129 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7130 link_bw, &pipe_config->fdi_m_n, false);
7131
7132 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7133 if (ret == -EDEADLK)
7134 return ret;
7135
7136 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7137 pipe_config->pipe_bpp -= 2*3;
7138 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7139 pipe_config->pipe_bpp);
7140 needs_recompute = true;
7141 pipe_config->bw_constrained = true;
7142
7143 goto retry;
7144 }
7145
7146 if (needs_recompute)
7147 return RETRY;
7148
7149 return ret;
7150 }
7151
7152 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7153 {
7154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7156
7157 /* IPS only exists on ULT machines and is tied to pipe A. */
7158 if (!hsw_crtc_supports_ips(crtc))
7159 return false;
7160
7161 if (!i915_modparams.enable_ips)
7162 return false;
7163
7164 if (crtc_state->pipe_bpp > 24)
7165 return false;
7166
7167 /*
7168 * We compare against max which means we must take
7169 * the increased cdclk requirement into account when
7170 * calculating the new cdclk.
7171 *
7172 * Should measure whether using a lower cdclk w/o IPS
7173 */
7174 if (IS_BROADWELL(dev_priv) &&
7175 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7176 return false;
7177
7178 return true;
7179 }
7180
7181 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7182 {
7183 struct drm_i915_private *dev_priv =
7184 to_i915(crtc_state->base.crtc->dev);
7185 struct intel_atomic_state *intel_state =
7186 to_intel_atomic_state(crtc_state->base.state);
7187
7188 if (!hsw_crtc_state_ips_capable(crtc_state))
7189 return false;
7190
7191 /*
7192 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7193 * enabled and disabled dynamically based on package C states,
7194 * user space can't make reliable use of the CRCs, so let's just
7195 * completely disable it.
7196 */
7197 if (crtc_state->crc_enabled)
7198 return false;
7199
7200 /* IPS should be fine as long as at least one plane is enabled. */
7201 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7202 return false;
7203
7204 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7205 if (IS_BROADWELL(dev_priv) &&
7206 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7207 return false;
7208
7209 return true;
7210 }
7211
7212 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7213 {
7214 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7215
7216 /* GDG double wide on either pipe, otherwise pipe A only */
7217 return INTEL_GEN(dev_priv) < 4 &&
7218 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7219 }
7220
7221 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7222 {
7223 u32 pixel_rate;
7224
7225 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7226
7227 /*
7228 * We only use IF-ID interlacing. If we ever use
7229 * PF-ID we'll need to adjust the pixel_rate here.
7230 */
7231
7232 if (pipe_config->pch_pfit.enabled) {
7233 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7234 u32 pfit_size = pipe_config->pch_pfit.size;
7235
7236 pipe_w = pipe_config->pipe_src_w;
7237 pipe_h = pipe_config->pipe_src_h;
7238
7239 pfit_w = (pfit_size >> 16) & 0xFFFF;
7240 pfit_h = pfit_size & 0xFFFF;
7241 if (pipe_w < pfit_w)
7242 pipe_w = pfit_w;
7243 if (pipe_h < pfit_h)
7244 pipe_h = pfit_h;
7245
7246 if (WARN_ON(!pfit_w || !pfit_h))
7247 return pixel_rate;
7248
7249 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7250 pfit_w * pfit_h);
7251 }
7252
7253 return pixel_rate;
7254 }
7255
7256 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7257 {
7258 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7259
7260 if (HAS_GMCH(dev_priv))
7261 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7262 crtc_state->pixel_rate =
7263 crtc_state->base.adjusted_mode.crtc_clock;
7264 else
7265 crtc_state->pixel_rate =
7266 ilk_pipe_pixel_rate(crtc_state);
7267 }
7268
7269 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7270 struct intel_crtc_state *pipe_config)
7271 {
7272 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7273 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7274 int clock_limit = dev_priv->max_dotclk_freq;
7275
7276 if (INTEL_GEN(dev_priv) < 4) {
7277 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7278
7279 /*
7280 * Enable double wide mode when the dot clock
7281 * is > 90% of the (display) core speed.
7282 */
7283 if (intel_crtc_supports_double_wide(crtc) &&
7284 adjusted_mode->crtc_clock > clock_limit) {
7285 clock_limit = dev_priv->max_dotclk_freq;
7286 pipe_config->double_wide = true;
7287 }
7288 }
7289
7290 if (adjusted_mode->crtc_clock > clock_limit) {
7291 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7292 adjusted_mode->crtc_clock, clock_limit,
7293 yesno(pipe_config->double_wide));
7294 return -EINVAL;
7295 }
7296
7297 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7298 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7299 pipe_config->base.ctm) {
7300 /*
7301 * There is only one pipe CSC unit per pipe, and we need that
7302 * for output conversion from RGB->YCBCR. So if CTM is already
7303 * applied we can't support YCBCR420 output.
7304 */
7305 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7306 return -EINVAL;
7307 }
7308
7309 /*
7310 * Pipe horizontal size must be even in:
7311 * - DVO ganged mode
7312 * - LVDS dual channel mode
7313 * - Double wide pipe
7314 */
7315 if (pipe_config->pipe_src_w & 1) {
7316 if (pipe_config->double_wide) {
7317 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7318 return -EINVAL;
7319 }
7320
7321 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7322 intel_is_dual_link_lvds(dev_priv)) {
7323 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7324 return -EINVAL;
7325 }
7326 }
7327
7328 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7329 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7330 */
7331 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7332 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7333 return -EINVAL;
7334
7335 intel_crtc_compute_pixel_rate(pipe_config);
7336
7337 if (pipe_config->has_pch_encoder)
7338 return ironlake_fdi_compute_config(crtc, pipe_config);
7339
7340 return 0;
7341 }
7342
7343 static void
7344 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7345 {
7346 while (*num > DATA_LINK_M_N_MASK ||
7347 *den > DATA_LINK_M_N_MASK) {
7348 *num >>= 1;
7349 *den >>= 1;
7350 }
7351 }
7352
7353 static void compute_m_n(unsigned int m, unsigned int n,
7354 u32 *ret_m, u32 *ret_n,
7355 bool constant_n)
7356 {
7357 /*
7358 * Several DP dongles in particular seem to be fussy about
7359 * too large link M/N values. Give N value as 0x8000 that
7360 * should be acceptable by specific devices. 0x8000 is the
7361 * specified fixed N value for asynchronous clock mode,
7362 * which the devices expect also in synchronous clock mode.
7363 */
7364 if (constant_n)
7365 *ret_n = 0x8000;
7366 else
7367 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7368
7369 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7370 intel_reduce_m_n_ratio(ret_m, ret_n);
7371 }
7372
7373 void
7374 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7375 int pixel_clock, int link_clock,
7376 struct intel_link_m_n *m_n,
7377 bool constant_n)
7378 {
7379 m_n->tu = 64;
7380
7381 compute_m_n(bits_per_pixel * pixel_clock,
7382 link_clock * nlanes * 8,
7383 &m_n->gmch_m, &m_n->gmch_n,
7384 constant_n);
7385
7386 compute_m_n(pixel_clock, link_clock,
7387 &m_n->link_m, &m_n->link_n,
7388 constant_n);
7389 }
7390
7391 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7392 {
7393 if (i915_modparams.panel_use_ssc >= 0)
7394 return i915_modparams.panel_use_ssc != 0;
7395 return dev_priv->vbt.lvds_use_ssc
7396 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7397 }
7398
7399 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7400 {
7401 return (1 << dpll->n) << 16 | dpll->m2;
7402 }
7403
7404 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7405 {
7406 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7407 }
7408
7409 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7410 struct intel_crtc_state *crtc_state,
7411 struct dpll *reduced_clock)
7412 {
7413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7414 u32 fp, fp2 = 0;
7415
7416 if (IS_PINEVIEW(dev_priv)) {
7417 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7418 if (reduced_clock)
7419 fp2 = pnv_dpll_compute_fp(reduced_clock);
7420 } else {
7421 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7422 if (reduced_clock)
7423 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7424 }
7425
7426 crtc_state->dpll_hw_state.fp0 = fp;
7427
7428 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7429 reduced_clock) {
7430 crtc_state->dpll_hw_state.fp1 = fp2;
7431 } else {
7432 crtc_state->dpll_hw_state.fp1 = fp;
7433 }
7434 }
7435
7436 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7437 pipe)
7438 {
7439 u32 reg_val;
7440
7441 /*
7442 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7443 * and set it to a reasonable value instead.
7444 */
7445 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7446 reg_val &= 0xffffff00;
7447 reg_val |= 0x00000030;
7448 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7449
7450 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7451 reg_val &= 0x00ffffff;
7452 reg_val |= 0x8c000000;
7453 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7454
7455 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7456 reg_val &= 0xffffff00;
7457 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7458
7459 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7460 reg_val &= 0x00ffffff;
7461 reg_val |= 0xb0000000;
7462 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7463 }
7464
7465 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7466 const struct intel_link_m_n *m_n)
7467 {
7468 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7469 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7470 enum pipe pipe = crtc->pipe;
7471
7472 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7473 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7474 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7475 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7476 }
7477
7478 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7479 enum transcoder transcoder)
7480 {
7481 if (IS_HASWELL(dev_priv))
7482 return transcoder == TRANSCODER_EDP;
7483
7484 /*
7485 * Strictly speaking some registers are available before
7486 * gen7, but we only support DRRS on gen7+
7487 */
7488 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7489 }
7490
7491 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7492 const struct intel_link_m_n *m_n,
7493 const struct intel_link_m_n *m2_n2)
7494 {
7495 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7496 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7497 enum pipe pipe = crtc->pipe;
7498 enum transcoder transcoder = crtc_state->cpu_transcoder;
7499
7500 if (INTEL_GEN(dev_priv) >= 5) {
7501 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7502 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7503 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7504 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7505 /*
7506 * M2_N2 registers are set only if DRRS is supported
7507 * (to make sure the registers are not unnecessarily accessed).
7508 */
7509 if (m2_n2 && crtc_state->has_drrs &&
7510 transcoder_has_m2_n2(dev_priv, transcoder)) {
7511 I915_WRITE(PIPE_DATA_M2(transcoder),
7512 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7513 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7514 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7515 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7516 }
7517 } else {
7518 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7519 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7520 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7521 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7522 }
7523 }
7524
7525 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7526 {
7527 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7528
7529 if (m_n == M1_N1) {
7530 dp_m_n = &crtc_state->dp_m_n;
7531 dp_m2_n2 = &crtc_state->dp_m2_n2;
7532 } else if (m_n == M2_N2) {
7533
7534 /*
7535 * M2_N2 registers are not supported. Hence m2_n2 divider value
7536 * needs to be programmed into M1_N1.
7537 */
7538 dp_m_n = &crtc_state->dp_m2_n2;
7539 } else {
7540 DRM_ERROR("Unsupported divider value\n");
7541 return;
7542 }
7543
7544 if (crtc_state->has_pch_encoder)
7545 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7546 else
7547 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7548 }
7549
7550 static void vlv_compute_dpll(struct intel_crtc *crtc,
7551 struct intel_crtc_state *pipe_config)
7552 {
7553 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7554 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7555 if (crtc->pipe != PIPE_A)
7556 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7557
7558 /* DPLL not used with DSI, but still need the rest set up */
7559 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7560 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7561 DPLL_EXT_BUFFER_ENABLE_VLV;
7562
7563 pipe_config->dpll_hw_state.dpll_md =
7564 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7565 }
7566
7567 static void chv_compute_dpll(struct intel_crtc *crtc,
7568 struct intel_crtc_state *pipe_config)
7569 {
7570 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7571 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7572 if (crtc->pipe != PIPE_A)
7573 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7574
7575 /* DPLL not used with DSI, but still need the rest set up */
7576 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7577 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7578
7579 pipe_config->dpll_hw_state.dpll_md =
7580 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7581 }
7582
7583 static void vlv_prepare_pll(struct intel_crtc *crtc,
7584 const struct intel_crtc_state *pipe_config)
7585 {
7586 struct drm_device *dev = crtc->base.dev;
7587 struct drm_i915_private *dev_priv = to_i915(dev);
7588 enum pipe pipe = crtc->pipe;
7589 u32 mdiv;
7590 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7591 u32 coreclk, reg_val;
7592
7593 /* Enable Refclk */
7594 I915_WRITE(DPLL(pipe),
7595 pipe_config->dpll_hw_state.dpll &
7596 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7597
7598 /* No need to actually set up the DPLL with DSI */
7599 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7600 return;
7601
7602 vlv_dpio_get(dev_priv);
7603
7604 bestn = pipe_config->dpll.n;
7605 bestm1 = pipe_config->dpll.m1;
7606 bestm2 = pipe_config->dpll.m2;
7607 bestp1 = pipe_config->dpll.p1;
7608 bestp2 = pipe_config->dpll.p2;
7609
7610 /* See eDP HDMI DPIO driver vbios notes doc */
7611
7612 /* PLL B needs special handling */
7613 if (pipe == PIPE_B)
7614 vlv_pllb_recal_opamp(dev_priv, pipe);
7615
7616 /* Set up Tx target for periodic Rcomp update */
7617 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7618
7619 /* Disable target IRef on PLL */
7620 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7621 reg_val &= 0x00ffffff;
7622 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7623
7624 /* Disable fast lock */
7625 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7626
7627 /* Set idtafcrecal before PLL is enabled */
7628 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7629 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7630 mdiv |= ((bestn << DPIO_N_SHIFT));
7631 mdiv |= (1 << DPIO_K_SHIFT);
7632
7633 /*
7634 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7635 * but we don't support that).
7636 * Note: don't use the DAC post divider as it seems unstable.
7637 */
7638 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7639 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7640
7641 mdiv |= DPIO_ENABLE_CALIBRATION;
7642 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7643
7644 /* Set HBR and RBR LPF coefficients */
7645 if (pipe_config->port_clock == 162000 ||
7646 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7647 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7648 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7649 0x009f0003);
7650 else
7651 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7652 0x00d0000f);
7653
7654 if (intel_crtc_has_dp_encoder(pipe_config)) {
7655 /* Use SSC source */
7656 if (pipe == PIPE_A)
7657 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7658 0x0df40000);
7659 else
7660 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7661 0x0df70000);
7662 } else { /* HDMI or VGA */
7663 /* Use bend source */
7664 if (pipe == PIPE_A)
7665 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7666 0x0df70000);
7667 else
7668 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7669 0x0df40000);
7670 }
7671
7672 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7673 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7674 if (intel_crtc_has_dp_encoder(pipe_config))
7675 coreclk |= 0x01000000;
7676 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7677
7678 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7679
7680 vlv_dpio_put(dev_priv);
7681 }
7682
7683 static void chv_prepare_pll(struct intel_crtc *crtc,
7684 const struct intel_crtc_state *pipe_config)
7685 {
7686 struct drm_device *dev = crtc->base.dev;
7687 struct drm_i915_private *dev_priv = to_i915(dev);
7688 enum pipe pipe = crtc->pipe;
7689 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7690 u32 loopfilter, tribuf_calcntr;
7691 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7692 u32 dpio_val;
7693 int vco;
7694
7695 /* Enable Refclk and SSC */
7696 I915_WRITE(DPLL(pipe),
7697 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7698
7699 /* No need to actually set up the DPLL with DSI */
7700 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7701 return;
7702
7703 bestn = pipe_config->dpll.n;
7704 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7705 bestm1 = pipe_config->dpll.m1;
7706 bestm2 = pipe_config->dpll.m2 >> 22;
7707 bestp1 = pipe_config->dpll.p1;
7708 bestp2 = pipe_config->dpll.p2;
7709 vco = pipe_config->dpll.vco;
7710 dpio_val = 0;
7711 loopfilter = 0;
7712
7713 vlv_dpio_get(dev_priv);
7714
7715 /* p1 and p2 divider */
7716 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7717 5 << DPIO_CHV_S1_DIV_SHIFT |
7718 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7719 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7720 1 << DPIO_CHV_K_DIV_SHIFT);
7721
7722 /* Feedback post-divider - m2 */
7723 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7724
7725 /* Feedback refclk divider - n and m1 */
7726 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7727 DPIO_CHV_M1_DIV_BY_2 |
7728 1 << DPIO_CHV_N_DIV_SHIFT);
7729
7730 /* M2 fraction division */
7731 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7732
7733 /* M2 fraction division enable */
7734 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7735 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7736 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7737 if (bestm2_frac)
7738 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7739 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7740
7741 /* Program digital lock detect threshold */
7742 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7743 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7744 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7745 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7746 if (!bestm2_frac)
7747 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7748 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7749
7750 /* Loop filter */
7751 if (vco == 5400000) {
7752 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7753 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7754 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7755 tribuf_calcntr = 0x9;
7756 } else if (vco <= 6200000) {
7757 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7758 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7759 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7760 tribuf_calcntr = 0x9;
7761 } else if (vco <= 6480000) {
7762 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7763 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7764 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7765 tribuf_calcntr = 0x8;
7766 } else {
7767 /* Not supported. Apply the same limits as in the max case */
7768 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7769 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7770 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7771 tribuf_calcntr = 0;
7772 }
7773 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7774
7775 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7776 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7777 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7778 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7779
7780 /* AFC Recal */
7781 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7782 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7783 DPIO_AFC_RECAL);
7784
7785 vlv_dpio_put(dev_priv);
7786 }
7787
7788 /**
7789 * vlv_force_pll_on - forcibly enable just the PLL
7790 * @dev_priv: i915 private structure
7791 * @pipe: pipe PLL to enable
7792 * @dpll: PLL configuration
7793 *
7794 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7795 * in cases where we need the PLL enabled even when @pipe is not going to
7796 * be enabled.
7797 */
7798 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7799 const struct dpll *dpll)
7800 {
7801 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7802 struct intel_crtc_state *pipe_config;
7803
7804 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7805 if (!pipe_config)
7806 return -ENOMEM;
7807
7808 pipe_config->base.crtc = &crtc->base;
7809 pipe_config->pixel_multiplier = 1;
7810 pipe_config->dpll = *dpll;
7811
7812 if (IS_CHERRYVIEW(dev_priv)) {
7813 chv_compute_dpll(crtc, pipe_config);
7814 chv_prepare_pll(crtc, pipe_config);
7815 chv_enable_pll(crtc, pipe_config);
7816 } else {
7817 vlv_compute_dpll(crtc, pipe_config);
7818 vlv_prepare_pll(crtc, pipe_config);
7819 vlv_enable_pll(crtc, pipe_config);
7820 }
7821
7822 kfree(pipe_config);
7823
7824 return 0;
7825 }
7826
7827 /**
7828 * vlv_force_pll_off - forcibly disable just the PLL
7829 * @dev_priv: i915 private structure
7830 * @pipe: pipe PLL to disable
7831 *
7832 * Disable the PLL for @pipe. To be used in cases where we need
7833 * the PLL enabled even when @pipe is not going to be enabled.
7834 */
7835 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7836 {
7837 if (IS_CHERRYVIEW(dev_priv))
7838 chv_disable_pll(dev_priv, pipe);
7839 else
7840 vlv_disable_pll(dev_priv, pipe);
7841 }
7842
7843 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7844 struct intel_crtc_state *crtc_state,
7845 struct dpll *reduced_clock)
7846 {
7847 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7848 u32 dpll;
7849 struct dpll *clock = &crtc_state->dpll;
7850
7851 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7852
7853 dpll = DPLL_VGA_MODE_DIS;
7854
7855 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7856 dpll |= DPLLB_MODE_LVDS;
7857 else
7858 dpll |= DPLLB_MODE_DAC_SERIAL;
7859
7860 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7861 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7862 dpll |= (crtc_state->pixel_multiplier - 1)
7863 << SDVO_MULTIPLIER_SHIFT_HIRES;
7864 }
7865
7866 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7867 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7868 dpll |= DPLL_SDVO_HIGH_SPEED;
7869
7870 if (intel_crtc_has_dp_encoder(crtc_state))
7871 dpll |= DPLL_SDVO_HIGH_SPEED;
7872
7873 /* compute bitmask from p1 value */
7874 if (IS_PINEVIEW(dev_priv))
7875 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7876 else {
7877 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7878 if (IS_G4X(dev_priv) && reduced_clock)
7879 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7880 }
7881 switch (clock->p2) {
7882 case 5:
7883 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7884 break;
7885 case 7:
7886 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7887 break;
7888 case 10:
7889 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7890 break;
7891 case 14:
7892 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7893 break;
7894 }
7895 if (INTEL_GEN(dev_priv) >= 4)
7896 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7897
7898 if (crtc_state->sdvo_tv_clock)
7899 dpll |= PLL_REF_INPUT_TVCLKINBC;
7900 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7901 intel_panel_use_ssc(dev_priv))
7902 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7903 else
7904 dpll |= PLL_REF_INPUT_DREFCLK;
7905
7906 dpll |= DPLL_VCO_ENABLE;
7907 crtc_state->dpll_hw_state.dpll = dpll;
7908
7909 if (INTEL_GEN(dev_priv) >= 4) {
7910 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7911 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7912 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7913 }
7914 }
7915
7916 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7917 struct intel_crtc_state *crtc_state,
7918 struct dpll *reduced_clock)
7919 {
7920 struct drm_device *dev = crtc->base.dev;
7921 struct drm_i915_private *dev_priv = to_i915(dev);
7922 u32 dpll;
7923 struct dpll *clock = &crtc_state->dpll;
7924
7925 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7926
7927 dpll = DPLL_VGA_MODE_DIS;
7928
7929 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7930 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7931 } else {
7932 if (clock->p1 == 2)
7933 dpll |= PLL_P1_DIVIDE_BY_TWO;
7934 else
7935 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7936 if (clock->p2 == 4)
7937 dpll |= PLL_P2_DIVIDE_BY_4;
7938 }
7939
7940 /*
7941 * Bspec:
7942 * "[Almador Errata}: For the correct operation of the muxed DVO pins
7943 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7944 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7945 * Enable) must be set to “1” in both the DPLL A Control Register
7946 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7947 *
7948 * For simplicity We simply keep both bits always enabled in
7949 * both DPLLS. The spec says we should disable the DVO 2X clock
7950 * when not needed, but this seems to work fine in practice.
7951 */
7952 if (IS_I830(dev_priv) ||
7953 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7954 dpll |= DPLL_DVO_2X_MODE;
7955
7956 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7957 intel_panel_use_ssc(dev_priv))
7958 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7959 else
7960 dpll |= PLL_REF_INPUT_DREFCLK;
7961
7962 dpll |= DPLL_VCO_ENABLE;
7963 crtc_state->dpll_hw_state.dpll = dpll;
7964 }
7965
7966 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7967 {
7968 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7970 enum pipe pipe = crtc->pipe;
7971 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7972 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7973 u32 crtc_vtotal, crtc_vblank_end;
7974 int vsyncshift = 0;
7975
7976 /* We need to be careful not to changed the adjusted mode, for otherwise
7977 * the hw state checker will get angry at the mismatch. */
7978 crtc_vtotal = adjusted_mode->crtc_vtotal;
7979 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7980
7981 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7982 /* the chip adds 2 halflines automatically */
7983 crtc_vtotal -= 1;
7984 crtc_vblank_end -= 1;
7985
7986 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7987 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7988 else
7989 vsyncshift = adjusted_mode->crtc_hsync_start -
7990 adjusted_mode->crtc_htotal / 2;
7991 if (vsyncshift < 0)
7992 vsyncshift += adjusted_mode->crtc_htotal;
7993 }
7994
7995 if (INTEL_GEN(dev_priv) > 3)
7996 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7997
7998 I915_WRITE(HTOTAL(cpu_transcoder),
7999 (adjusted_mode->crtc_hdisplay - 1) |
8000 ((adjusted_mode->crtc_htotal - 1) << 16));
8001 I915_WRITE(HBLANK(cpu_transcoder),
8002 (adjusted_mode->crtc_hblank_start - 1) |
8003 ((adjusted_mode->crtc_hblank_end - 1) << 16));
8004 I915_WRITE(HSYNC(cpu_transcoder),
8005 (adjusted_mode->crtc_hsync_start - 1) |
8006 ((adjusted_mode->crtc_hsync_end - 1) << 16));
8007
8008 I915_WRITE(VTOTAL(cpu_transcoder),
8009 (adjusted_mode->crtc_vdisplay - 1) |
8010 ((crtc_vtotal - 1) << 16));
8011 I915_WRITE(VBLANK(cpu_transcoder),
8012 (adjusted_mode->crtc_vblank_start - 1) |
8013 ((crtc_vblank_end - 1) << 16));
8014 I915_WRITE(VSYNC(cpu_transcoder),
8015 (adjusted_mode->crtc_vsync_start - 1) |
8016 ((adjusted_mode->crtc_vsync_end - 1) << 16));
8017
8018 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8019 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8020 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8021 * bits. */
8022 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8023 (pipe == PIPE_B || pipe == PIPE_C))
8024 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8025
8026 }
8027
8028 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8029 {
8030 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8031 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8032 enum pipe pipe = crtc->pipe;
8033
8034 /* pipesrc controls the size that is scaled from, which should
8035 * always be the user's requested size.
8036 */
8037 I915_WRITE(PIPESRC(pipe),
8038 ((crtc_state->pipe_src_w - 1) << 16) |
8039 (crtc_state->pipe_src_h - 1));
8040 }
8041
8042 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8043 struct intel_crtc_state *pipe_config)
8044 {
8045 struct drm_device *dev = crtc->base.dev;
8046 struct drm_i915_private *dev_priv = to_i915(dev);
8047 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8048 u32 tmp;
8049
8050 tmp = I915_READ(HTOTAL(cpu_transcoder));
8051 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8052 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8053
8054 if (!transcoder_is_dsi(cpu_transcoder)) {
8055 tmp = I915_READ(HBLANK(cpu_transcoder));
8056 pipe_config->base.adjusted_mode.crtc_hblank_start =
8057 (tmp & 0xffff) + 1;
8058 pipe_config->base.adjusted_mode.crtc_hblank_end =
8059 ((tmp >> 16) & 0xffff) + 1;
8060 }
8061 tmp = I915_READ(HSYNC(cpu_transcoder));
8062 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8063 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8064
8065 tmp = I915_READ(VTOTAL(cpu_transcoder));
8066 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8067 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8068
8069 if (!transcoder_is_dsi(cpu_transcoder)) {
8070 tmp = I915_READ(VBLANK(cpu_transcoder));
8071 pipe_config->base.adjusted_mode.crtc_vblank_start =
8072 (tmp & 0xffff) + 1;
8073 pipe_config->base.adjusted_mode.crtc_vblank_end =
8074 ((tmp >> 16) & 0xffff) + 1;
8075 }
8076 tmp = I915_READ(VSYNC(cpu_transcoder));
8077 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8078 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8079
8080 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
8081 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8082 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8083 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8084 }
8085 }
8086
8087 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8088 struct intel_crtc_state *pipe_config)
8089 {
8090 struct drm_device *dev = crtc->base.dev;
8091 struct drm_i915_private *dev_priv = to_i915(dev);
8092 u32 tmp;
8093
8094 tmp = I915_READ(PIPESRC(crtc->pipe));
8095 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8096 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8097
8098 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8099 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8100 }
8101
8102 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8103 struct intel_crtc_state *pipe_config)
8104 {
8105 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8106 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8107 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8108 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8109
8110 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8111 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8112 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8113 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8114
8115 mode->flags = pipe_config->base.adjusted_mode.flags;
8116 mode->type = DRM_MODE_TYPE_DRIVER;
8117
8118 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8119
8120 mode->hsync = drm_mode_hsync(mode);
8121 mode->vrefresh = drm_mode_vrefresh(mode);
8122 drm_mode_set_name(mode);
8123 }
8124
8125 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8126 {
8127 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8128 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8129 u32 pipeconf;
8130
8131 pipeconf = 0;
8132
8133 /* we keep both pipes enabled on 830 */
8134 if (IS_I830(dev_priv))
8135 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8136
8137 if (crtc_state->double_wide)
8138 pipeconf |= PIPECONF_DOUBLE_WIDE;
8139
8140 /* only g4x and later have fancy bpc/dither controls */
8141 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8142 IS_CHERRYVIEW(dev_priv)) {
8143 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8144 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8145 pipeconf |= PIPECONF_DITHER_EN |
8146 PIPECONF_DITHER_TYPE_SP;
8147
8148 switch (crtc_state->pipe_bpp) {
8149 case 18:
8150 pipeconf |= PIPECONF_6BPC;
8151 break;
8152 case 24:
8153 pipeconf |= PIPECONF_8BPC;
8154 break;
8155 case 30:
8156 pipeconf |= PIPECONF_10BPC;
8157 break;
8158 default:
8159 /* Case prevented by intel_choose_pipe_bpp_dither. */
8160 BUG();
8161 }
8162 }
8163
8164 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8165 if (INTEL_GEN(dev_priv) < 4 ||
8166 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8167 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8168 else
8169 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8170 } else {
8171 pipeconf |= PIPECONF_PROGRESSIVE;
8172 }
8173
8174 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8175 crtc_state->limited_color_range)
8176 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8177
8178 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8179
8180 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8181 POSTING_READ(PIPECONF(crtc->pipe));
8182 }
8183
8184 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8185 struct intel_crtc_state *crtc_state)
8186 {
8187 struct drm_device *dev = crtc->base.dev;
8188 struct drm_i915_private *dev_priv = to_i915(dev);
8189 const struct intel_limit *limit;
8190 int refclk = 48000;
8191
8192 memset(&crtc_state->dpll_hw_state, 0,
8193 sizeof(crtc_state->dpll_hw_state));
8194
8195 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8196 if (intel_panel_use_ssc(dev_priv)) {
8197 refclk = dev_priv->vbt.lvds_ssc_freq;
8198 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8199 }
8200
8201 limit = &intel_limits_i8xx_lvds;
8202 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8203 limit = &intel_limits_i8xx_dvo;
8204 } else {
8205 limit = &intel_limits_i8xx_dac;
8206 }
8207
8208 if (!crtc_state->clock_set &&
8209 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8210 refclk, NULL, &crtc_state->dpll)) {
8211 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8212 return -EINVAL;
8213 }
8214
8215 i8xx_compute_dpll(crtc, crtc_state, NULL);
8216
8217 return 0;
8218 }
8219
8220 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8221 struct intel_crtc_state *crtc_state)
8222 {
8223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8224 const struct intel_limit *limit;
8225 int refclk = 96000;
8226
8227 memset(&crtc_state->dpll_hw_state, 0,
8228 sizeof(crtc_state->dpll_hw_state));
8229
8230 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8231 if (intel_panel_use_ssc(dev_priv)) {
8232 refclk = dev_priv->vbt.lvds_ssc_freq;
8233 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8234 }
8235
8236 if (intel_is_dual_link_lvds(dev_priv))
8237 limit = &intel_limits_g4x_dual_channel_lvds;
8238 else
8239 limit = &intel_limits_g4x_single_channel_lvds;
8240 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8241 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8242 limit = &intel_limits_g4x_hdmi;
8243 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8244 limit = &intel_limits_g4x_sdvo;
8245 } else {
8246 /* The option is for other outputs */
8247 limit = &intel_limits_i9xx_sdvo;
8248 }
8249
8250 if (!crtc_state->clock_set &&
8251 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8252 refclk, NULL, &crtc_state->dpll)) {
8253 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8254 return -EINVAL;
8255 }
8256
8257 i9xx_compute_dpll(crtc, crtc_state, NULL);
8258
8259 return 0;
8260 }
8261
8262 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8263 struct intel_crtc_state *crtc_state)
8264 {
8265 struct drm_device *dev = crtc->base.dev;
8266 struct drm_i915_private *dev_priv = to_i915(dev);
8267 const struct intel_limit *limit;
8268 int refclk = 96000;
8269
8270 memset(&crtc_state->dpll_hw_state, 0,
8271 sizeof(crtc_state->dpll_hw_state));
8272
8273 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8274 if (intel_panel_use_ssc(dev_priv)) {
8275 refclk = dev_priv->vbt.lvds_ssc_freq;
8276 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8277 }
8278
8279 limit = &intel_limits_pineview_lvds;
8280 } else {
8281 limit = &intel_limits_pineview_sdvo;
8282 }
8283
8284 if (!crtc_state->clock_set &&
8285 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8286 refclk, NULL, &crtc_state->dpll)) {
8287 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8288 return -EINVAL;
8289 }
8290
8291 i9xx_compute_dpll(crtc, crtc_state, NULL);
8292
8293 return 0;
8294 }
8295
8296 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8297 struct intel_crtc_state *crtc_state)
8298 {
8299 struct drm_device *dev = crtc->base.dev;
8300 struct drm_i915_private *dev_priv = to_i915(dev);
8301 const struct intel_limit *limit;
8302 int refclk = 96000;
8303
8304 memset(&crtc_state->dpll_hw_state, 0,
8305 sizeof(crtc_state->dpll_hw_state));
8306
8307 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8308 if (intel_panel_use_ssc(dev_priv)) {
8309 refclk = dev_priv->vbt.lvds_ssc_freq;
8310 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8311 }
8312
8313 limit = &intel_limits_i9xx_lvds;
8314 } else {
8315 limit = &intel_limits_i9xx_sdvo;
8316 }
8317
8318 if (!crtc_state->clock_set &&
8319 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8320 refclk, NULL, &crtc_state->dpll)) {
8321 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8322 return -EINVAL;
8323 }
8324
8325 i9xx_compute_dpll(crtc, crtc_state, NULL);
8326
8327 return 0;
8328 }
8329
8330 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8331 struct intel_crtc_state *crtc_state)
8332 {
8333 int refclk = 100000;
8334 const struct intel_limit *limit = &intel_limits_chv;
8335
8336 memset(&crtc_state->dpll_hw_state, 0,
8337 sizeof(crtc_state->dpll_hw_state));
8338
8339 if (!crtc_state->clock_set &&
8340 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8341 refclk, NULL, &crtc_state->dpll)) {
8342 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8343 return -EINVAL;
8344 }
8345
8346 chv_compute_dpll(crtc, crtc_state);
8347
8348 return 0;
8349 }
8350
8351 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8352 struct intel_crtc_state *crtc_state)
8353 {
8354 int refclk = 100000;
8355 const struct intel_limit *limit = &intel_limits_vlv;
8356
8357 memset(&crtc_state->dpll_hw_state, 0,
8358 sizeof(crtc_state->dpll_hw_state));
8359
8360 if (!crtc_state->clock_set &&
8361 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8362 refclk, NULL, &crtc_state->dpll)) {
8363 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8364 return -EINVAL;
8365 }
8366
8367 vlv_compute_dpll(crtc, crtc_state);
8368
8369 return 0;
8370 }
8371
8372 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8373 {
8374 if (IS_I830(dev_priv))
8375 return false;
8376
8377 return INTEL_GEN(dev_priv) >= 4 ||
8378 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8379 }
8380
8381 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8382 struct intel_crtc_state *pipe_config)
8383 {
8384 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8385 u32 tmp;
8386
8387 if (!i9xx_has_pfit(dev_priv))
8388 return;
8389
8390 tmp = I915_READ(PFIT_CONTROL);
8391 if (!(tmp & PFIT_ENABLE))
8392 return;
8393
8394 /* Check whether the pfit is attached to our pipe. */
8395 if (INTEL_GEN(dev_priv) < 4) {
8396 if (crtc->pipe != PIPE_B)
8397 return;
8398 } else {
8399 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8400 return;
8401 }
8402
8403 pipe_config->gmch_pfit.control = tmp;
8404 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8405 }
8406
8407 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8408 struct intel_crtc_state *pipe_config)
8409 {
8410 struct drm_device *dev = crtc->base.dev;
8411 struct drm_i915_private *dev_priv = to_i915(dev);
8412 int pipe = pipe_config->cpu_transcoder;
8413 struct dpll clock;
8414 u32 mdiv;
8415 int refclk = 100000;
8416
8417 /* In case of DSI, DPLL will not be used */
8418 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8419 return;
8420
8421 vlv_dpio_get(dev_priv);
8422 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8423 vlv_dpio_put(dev_priv);
8424
8425 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8426 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8427 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8428 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8429 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8430
8431 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8432 }
8433
8434 static void
8435 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8436 struct intel_initial_plane_config *plane_config)
8437 {
8438 struct drm_device *dev = crtc->base.dev;
8439 struct drm_i915_private *dev_priv = to_i915(dev);
8440 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8441 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8442 enum pipe pipe;
8443 u32 val, base, offset;
8444 int fourcc, pixel_format;
8445 unsigned int aligned_height;
8446 struct drm_framebuffer *fb;
8447 struct intel_framebuffer *intel_fb;
8448
8449 if (!plane->get_hw_state(plane, &pipe))
8450 return;
8451
8452 WARN_ON(pipe != crtc->pipe);
8453
8454 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8455 if (!intel_fb) {
8456 DRM_DEBUG_KMS("failed to alloc fb\n");
8457 return;
8458 }
8459
8460 fb = &intel_fb->base;
8461
8462 fb->dev = dev;
8463
8464 val = I915_READ(DSPCNTR(i9xx_plane));
8465
8466 if (INTEL_GEN(dev_priv) >= 4) {
8467 if (val & DISPPLANE_TILED) {
8468 plane_config->tiling = I915_TILING_X;
8469 fb->modifier = I915_FORMAT_MOD_X_TILED;
8470 }
8471
8472 if (val & DISPPLANE_ROTATE_180)
8473 plane_config->rotation = DRM_MODE_ROTATE_180;
8474 }
8475
8476 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8477 val & DISPPLANE_MIRROR)
8478 plane_config->rotation |= DRM_MODE_REFLECT_X;
8479
8480 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8481 fourcc = i9xx_format_to_fourcc(pixel_format);
8482 fb->format = drm_format_info(fourcc);
8483
8484 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8485 offset = I915_READ(DSPOFFSET(i9xx_plane));
8486 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8487 } else if (INTEL_GEN(dev_priv) >= 4) {
8488 if (plane_config->tiling)
8489 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8490 else
8491 offset = I915_READ(DSPLINOFF(i9xx_plane));
8492 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8493 } else {
8494 base = I915_READ(DSPADDR(i9xx_plane));
8495 }
8496 plane_config->base = base;
8497
8498 val = I915_READ(PIPESRC(pipe));
8499 fb->width = ((val >> 16) & 0xfff) + 1;
8500 fb->height = ((val >> 0) & 0xfff) + 1;
8501
8502 val = I915_READ(DSPSTRIDE(i9xx_plane));
8503 fb->pitches[0] = val & 0xffffffc0;
8504
8505 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8506
8507 plane_config->size = fb->pitches[0] * aligned_height;
8508
8509 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8510 crtc->base.name, plane->base.name, fb->width, fb->height,
8511 fb->format->cpp[0] * 8, base, fb->pitches[0],
8512 plane_config->size);
8513
8514 plane_config->fb = intel_fb;
8515 }
8516
8517 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8518 struct intel_crtc_state *pipe_config)
8519 {
8520 struct drm_device *dev = crtc->base.dev;
8521 struct drm_i915_private *dev_priv = to_i915(dev);
8522 int pipe = pipe_config->cpu_transcoder;
8523 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8524 struct dpll clock;
8525 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8526 int refclk = 100000;
8527
8528 /* In case of DSI, DPLL will not be used */
8529 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8530 return;
8531
8532 vlv_dpio_get(dev_priv);
8533 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8534 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8535 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8536 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8537 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8538 vlv_dpio_put(dev_priv);
8539
8540 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8541 clock.m2 = (pll_dw0 & 0xff) << 22;
8542 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8543 clock.m2 |= pll_dw2 & 0x3fffff;
8544 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8545 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8546 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8547
8548 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8549 }
8550
8551 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8552 struct intel_crtc_state *pipe_config)
8553 {
8554 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8555 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8556
8557 pipe_config->lspcon_downsampling = false;
8558
8559 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8560 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8561
8562 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8563 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8564 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8565
8566 if (ycbcr420_enabled) {
8567 /* We support 4:2:0 in full blend mode only */
8568 if (!blend)
8569 output = INTEL_OUTPUT_FORMAT_INVALID;
8570 else if (!(IS_GEMINILAKE(dev_priv) ||
8571 INTEL_GEN(dev_priv) >= 10))
8572 output = INTEL_OUTPUT_FORMAT_INVALID;
8573 else
8574 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8575 } else {
8576 /*
8577 * Currently there is no interface defined to
8578 * check user preference between RGB/YCBCR444
8579 * or YCBCR420. So the only possible case for
8580 * YCBCR444 usage is driving YCBCR420 output
8581 * with LSPCON, when pipe is configured for
8582 * YCBCR444 output and LSPCON takes care of
8583 * downsampling it.
8584 */
8585 pipe_config->lspcon_downsampling = true;
8586 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8587 }
8588 }
8589 }
8590
8591 pipe_config->output_format = output;
8592 }
8593
8594 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8595 {
8596 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8597 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8598 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8599 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8600 u32 tmp;
8601
8602 tmp = I915_READ(DSPCNTR(i9xx_plane));
8603
8604 if (tmp & DISPPLANE_GAMMA_ENABLE)
8605 crtc_state->gamma_enable = true;
8606
8607 if (!HAS_GMCH(dev_priv) &&
8608 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8609 crtc_state->csc_enable = true;
8610 }
8611
8612 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8613 struct intel_crtc_state *pipe_config)
8614 {
8615 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8616 enum intel_display_power_domain power_domain;
8617 intel_wakeref_t wakeref;
8618 u32 tmp;
8619 bool ret;
8620
8621 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8622 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8623 if (!wakeref)
8624 return false;
8625
8626 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8627 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8628 pipe_config->shared_dpll = NULL;
8629
8630 ret = false;
8631
8632 tmp = I915_READ(PIPECONF(crtc->pipe));
8633 if (!(tmp & PIPECONF_ENABLE))
8634 goto out;
8635
8636 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8637 IS_CHERRYVIEW(dev_priv)) {
8638 switch (tmp & PIPECONF_BPC_MASK) {
8639 case PIPECONF_6BPC:
8640 pipe_config->pipe_bpp = 18;
8641 break;
8642 case PIPECONF_8BPC:
8643 pipe_config->pipe_bpp = 24;
8644 break;
8645 case PIPECONF_10BPC:
8646 pipe_config->pipe_bpp = 30;
8647 break;
8648 default:
8649 break;
8650 }
8651 }
8652
8653 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8654 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8655 pipe_config->limited_color_range = true;
8656
8657 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8658 PIPECONF_GAMMA_MODE_SHIFT;
8659
8660 if (IS_CHERRYVIEW(dev_priv))
8661 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8662
8663 i9xx_get_pipe_color_config(pipe_config);
8664
8665 if (INTEL_GEN(dev_priv) < 4)
8666 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8667
8668 intel_get_pipe_timings(crtc, pipe_config);
8669 intel_get_pipe_src_size(crtc, pipe_config);
8670
8671 i9xx_get_pfit_config(crtc, pipe_config);
8672
8673 if (INTEL_GEN(dev_priv) >= 4) {
8674 /* No way to read it out on pipes B and C */
8675 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8676 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8677 else
8678 tmp = I915_READ(DPLL_MD(crtc->pipe));
8679 pipe_config->pixel_multiplier =
8680 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8681 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8682 pipe_config->dpll_hw_state.dpll_md = tmp;
8683 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8684 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8685 tmp = I915_READ(DPLL(crtc->pipe));
8686 pipe_config->pixel_multiplier =
8687 ((tmp & SDVO_MULTIPLIER_MASK)
8688 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8689 } else {
8690 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8691 * port and will be fixed up in the encoder->get_config
8692 * function. */
8693 pipe_config->pixel_multiplier = 1;
8694 }
8695 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8696 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8697 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8698 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8699 } else {
8700 /* Mask out read-only status bits. */
8701 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8702 DPLL_PORTC_READY_MASK |
8703 DPLL_PORTB_READY_MASK);
8704 }
8705
8706 if (IS_CHERRYVIEW(dev_priv))
8707 chv_crtc_clock_get(crtc, pipe_config);
8708 else if (IS_VALLEYVIEW(dev_priv))
8709 vlv_crtc_clock_get(crtc, pipe_config);
8710 else
8711 i9xx_crtc_clock_get(crtc, pipe_config);
8712
8713 /*
8714 * Normally the dotclock is filled in by the encoder .get_config()
8715 * but in case the pipe is enabled w/o any ports we need a sane
8716 * default.
8717 */
8718 pipe_config->base.adjusted_mode.crtc_clock =
8719 pipe_config->port_clock / pipe_config->pixel_multiplier;
8720
8721 ret = true;
8722
8723 out:
8724 intel_display_power_put(dev_priv, power_domain, wakeref);
8725
8726 return ret;
8727 }
8728
8729 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8730 {
8731 struct intel_encoder *encoder;
8732 int i;
8733 u32 val, final;
8734 bool has_lvds = false;
8735 bool has_cpu_edp = false;
8736 bool has_panel = false;
8737 bool has_ck505 = false;
8738 bool can_ssc = false;
8739 bool using_ssc_source = false;
8740
8741 /* We need to take the global config into account */
8742 for_each_intel_encoder(&dev_priv->drm, encoder) {
8743 switch (encoder->type) {
8744 case INTEL_OUTPUT_LVDS:
8745 has_panel = true;
8746 has_lvds = true;
8747 break;
8748 case INTEL_OUTPUT_EDP:
8749 has_panel = true;
8750 if (encoder->port == PORT_A)
8751 has_cpu_edp = true;
8752 break;
8753 default:
8754 break;
8755 }
8756 }
8757
8758 if (HAS_PCH_IBX(dev_priv)) {
8759 has_ck505 = dev_priv->vbt.display_clock_mode;
8760 can_ssc = has_ck505;
8761 } else {
8762 has_ck505 = false;
8763 can_ssc = true;
8764 }
8765
8766 /* Check if any DPLLs are using the SSC source */
8767 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8768 u32 temp = I915_READ(PCH_DPLL(i));
8769
8770 if (!(temp & DPLL_VCO_ENABLE))
8771 continue;
8772
8773 if ((temp & PLL_REF_INPUT_MASK) ==
8774 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8775 using_ssc_source = true;
8776 break;
8777 }
8778 }
8779
8780 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8781 has_panel, has_lvds, has_ck505, using_ssc_source);
8782
8783 /* Ironlake: try to setup display ref clock before DPLL
8784 * enabling. This is only under driver's control after
8785 * PCH B stepping, previous chipset stepping should be
8786 * ignoring this setting.
8787 */
8788 val = I915_READ(PCH_DREF_CONTROL);
8789
8790 /* As we must carefully and slowly disable/enable each source in turn,
8791 * compute the final state we want first and check if we need to
8792 * make any changes at all.
8793 */
8794 final = val;
8795 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8796 if (has_ck505)
8797 final |= DREF_NONSPREAD_CK505_ENABLE;
8798 else
8799 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8800
8801 final &= ~DREF_SSC_SOURCE_MASK;
8802 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8803 final &= ~DREF_SSC1_ENABLE;
8804
8805 if (has_panel) {
8806 final |= DREF_SSC_SOURCE_ENABLE;
8807
8808 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8809 final |= DREF_SSC1_ENABLE;
8810
8811 if (has_cpu_edp) {
8812 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8813 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8814 else
8815 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8816 } else
8817 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8818 } else if (using_ssc_source) {
8819 final |= DREF_SSC_SOURCE_ENABLE;
8820 final |= DREF_SSC1_ENABLE;
8821 }
8822
8823 if (final == val)
8824 return;
8825
8826 /* Always enable nonspread source */
8827 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8828
8829 if (has_ck505)
8830 val |= DREF_NONSPREAD_CK505_ENABLE;
8831 else
8832 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8833
8834 if (has_panel) {
8835 val &= ~DREF_SSC_SOURCE_MASK;
8836 val |= DREF_SSC_SOURCE_ENABLE;
8837
8838 /* SSC must be turned on before enabling the CPU output */
8839 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8840 DRM_DEBUG_KMS("Using SSC on panel\n");
8841 val |= DREF_SSC1_ENABLE;
8842 } else
8843 val &= ~DREF_SSC1_ENABLE;
8844
8845 /* Get SSC going before enabling the outputs */
8846 I915_WRITE(PCH_DREF_CONTROL, val);
8847 POSTING_READ(PCH_DREF_CONTROL);
8848 udelay(200);
8849
8850 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8851
8852 /* Enable CPU source on CPU attached eDP */
8853 if (has_cpu_edp) {
8854 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8855 DRM_DEBUG_KMS("Using SSC on eDP\n");
8856 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8857 } else
8858 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8859 } else
8860 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8861
8862 I915_WRITE(PCH_DREF_CONTROL, val);
8863 POSTING_READ(PCH_DREF_CONTROL);
8864 udelay(200);
8865 } else {
8866 DRM_DEBUG_KMS("Disabling CPU source output\n");
8867
8868 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8869
8870 /* Turn off CPU output */
8871 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8872
8873 I915_WRITE(PCH_DREF_CONTROL, val);
8874 POSTING_READ(PCH_DREF_CONTROL);
8875 udelay(200);
8876
8877 if (!using_ssc_source) {
8878 DRM_DEBUG_KMS("Disabling SSC source\n");
8879
8880 /* Turn off the SSC source */
8881 val &= ~DREF_SSC_SOURCE_MASK;
8882 val |= DREF_SSC_SOURCE_DISABLE;
8883
8884 /* Turn off SSC1 */
8885 val &= ~DREF_SSC1_ENABLE;
8886
8887 I915_WRITE(PCH_DREF_CONTROL, val);
8888 POSTING_READ(PCH_DREF_CONTROL);
8889 udelay(200);
8890 }
8891 }
8892
8893 BUG_ON(val != final);
8894 }
8895
8896 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8897 {
8898 u32 tmp;
8899
8900 tmp = I915_READ(SOUTH_CHICKEN2);
8901 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8902 I915_WRITE(SOUTH_CHICKEN2, tmp);
8903
8904 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8905 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8906 DRM_ERROR("FDI mPHY reset assert timeout\n");
8907
8908 tmp = I915_READ(SOUTH_CHICKEN2);
8909 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8910 I915_WRITE(SOUTH_CHICKEN2, tmp);
8911
8912 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8913 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8914 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8915 }
8916
8917 /* WaMPhyProgramming:hsw */
8918 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8919 {
8920 u32 tmp;
8921
8922 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8923 tmp &= ~(0xFF << 24);
8924 tmp |= (0x12 << 24);
8925 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8926
8927 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8928 tmp |= (1 << 11);
8929 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8930
8931 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8932 tmp |= (1 << 11);
8933 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8934
8935 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8936 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8937 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8938
8939 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8940 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8941 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8942
8943 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8944 tmp &= ~(7 << 13);
8945 tmp |= (5 << 13);
8946 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8947
8948 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8949 tmp &= ~(7 << 13);
8950 tmp |= (5 << 13);
8951 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8952
8953 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8954 tmp &= ~0xFF;
8955 tmp |= 0x1C;
8956 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8957
8958 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8959 tmp &= ~0xFF;
8960 tmp |= 0x1C;
8961 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8962
8963 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8964 tmp &= ~(0xFF << 16);
8965 tmp |= (0x1C << 16);
8966 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8967
8968 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8969 tmp &= ~(0xFF << 16);
8970 tmp |= (0x1C << 16);
8971 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8972
8973 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8974 tmp |= (1 << 27);
8975 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8976
8977 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8978 tmp |= (1 << 27);
8979 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8980
8981 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8982 tmp &= ~(0xF << 28);
8983 tmp |= (4 << 28);
8984 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8985
8986 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8987 tmp &= ~(0xF << 28);
8988 tmp |= (4 << 28);
8989 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8990 }
8991
8992 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8993 * Programming" based on the parameters passed:
8994 * - Sequence to enable CLKOUT_DP
8995 * - Sequence to enable CLKOUT_DP without spread
8996 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8997 */
8998 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8999 bool with_spread, bool with_fdi)
9000 {
9001 u32 reg, tmp;
9002
9003 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9004 with_spread = true;
9005 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9006 with_fdi, "LP PCH doesn't have FDI\n"))
9007 with_fdi = false;
9008
9009 mutex_lock(&dev_priv->sb_lock);
9010
9011 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9012 tmp &= ~SBI_SSCCTL_DISABLE;
9013 tmp |= SBI_SSCCTL_PATHALT;
9014 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9015
9016 udelay(24);
9017
9018 if (with_spread) {
9019 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9020 tmp &= ~SBI_SSCCTL_PATHALT;
9021 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9022
9023 if (with_fdi) {
9024 lpt_reset_fdi_mphy(dev_priv);
9025 lpt_program_fdi_mphy(dev_priv);
9026 }
9027 }
9028
9029 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9030 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9031 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9032 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9033
9034 mutex_unlock(&dev_priv->sb_lock);
9035 }
9036
9037 /* Sequence to disable CLKOUT_DP */
9038 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9039 {
9040 u32 reg, tmp;
9041
9042 mutex_lock(&dev_priv->sb_lock);
9043
9044 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9045 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9046 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9047 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9048
9049 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9050 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9051 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9052 tmp |= SBI_SSCCTL_PATHALT;
9053 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9054 udelay(32);
9055 }
9056 tmp |= SBI_SSCCTL_DISABLE;
9057 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9058 }
9059
9060 mutex_unlock(&dev_priv->sb_lock);
9061 }
9062
9063 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9064
9065 static const u16 sscdivintphase[] = {
9066 [BEND_IDX( 50)] = 0x3B23,
9067 [BEND_IDX( 45)] = 0x3B23,
9068 [BEND_IDX( 40)] = 0x3C23,
9069 [BEND_IDX( 35)] = 0x3C23,
9070 [BEND_IDX( 30)] = 0x3D23,
9071 [BEND_IDX( 25)] = 0x3D23,
9072 [BEND_IDX( 20)] = 0x3E23,
9073 [BEND_IDX( 15)] = 0x3E23,
9074 [BEND_IDX( 10)] = 0x3F23,
9075 [BEND_IDX( 5)] = 0x3F23,
9076 [BEND_IDX( 0)] = 0x0025,
9077 [BEND_IDX( -5)] = 0x0025,
9078 [BEND_IDX(-10)] = 0x0125,
9079 [BEND_IDX(-15)] = 0x0125,
9080 [BEND_IDX(-20)] = 0x0225,
9081 [BEND_IDX(-25)] = 0x0225,
9082 [BEND_IDX(-30)] = 0x0325,
9083 [BEND_IDX(-35)] = 0x0325,
9084 [BEND_IDX(-40)] = 0x0425,
9085 [BEND_IDX(-45)] = 0x0425,
9086 [BEND_IDX(-50)] = 0x0525,
9087 };
9088
9089 /*
9090 * Bend CLKOUT_DP
9091 * steps -50 to 50 inclusive, in steps of 5
9092 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9093 * change in clock period = -(steps / 10) * 5.787 ps
9094 */
9095 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9096 {
9097 u32 tmp;
9098 int idx = BEND_IDX(steps);
9099
9100 if (WARN_ON(steps % 5 != 0))
9101 return;
9102
9103 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9104 return;
9105
9106 mutex_lock(&dev_priv->sb_lock);
9107
9108 if (steps % 10 != 0)
9109 tmp = 0xAAAAAAAB;
9110 else
9111 tmp = 0x00000000;
9112 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9113
9114 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9115 tmp &= 0xffff0000;
9116 tmp |= sscdivintphase[idx];
9117 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9118
9119 mutex_unlock(&dev_priv->sb_lock);
9120 }
9121
9122 #undef BEND_IDX
9123
9124 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9125 {
9126 struct intel_encoder *encoder;
9127 bool has_vga = false;
9128
9129 for_each_intel_encoder(&dev_priv->drm, encoder) {
9130 switch (encoder->type) {
9131 case INTEL_OUTPUT_ANALOG:
9132 has_vga = true;
9133 break;
9134 default:
9135 break;
9136 }
9137 }
9138
9139 if (has_vga) {
9140 lpt_bend_clkout_dp(dev_priv, 0);
9141 lpt_enable_clkout_dp(dev_priv, true, true);
9142 } else {
9143 lpt_disable_clkout_dp(dev_priv);
9144 }
9145 }
9146
9147 /*
9148 * Initialize reference clocks when the driver loads
9149 */
9150 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9151 {
9152 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9153 ironlake_init_pch_refclk(dev_priv);
9154 else if (HAS_PCH_LPT(dev_priv))
9155 lpt_init_pch_refclk(dev_priv);
9156 }
9157
9158 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9159 {
9160 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9161 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9162 enum pipe pipe = crtc->pipe;
9163 u32 val;
9164
9165 val = 0;
9166
9167 switch (crtc_state->pipe_bpp) {
9168 case 18:
9169 val |= PIPECONF_6BPC;
9170 break;
9171 case 24:
9172 val |= PIPECONF_8BPC;
9173 break;
9174 case 30:
9175 val |= PIPECONF_10BPC;
9176 break;
9177 case 36:
9178 val |= PIPECONF_12BPC;
9179 break;
9180 default:
9181 /* Case prevented by intel_choose_pipe_bpp_dither. */
9182 BUG();
9183 }
9184
9185 if (crtc_state->dither)
9186 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9187
9188 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9189 val |= PIPECONF_INTERLACED_ILK;
9190 else
9191 val |= PIPECONF_PROGRESSIVE;
9192
9193 if (crtc_state->limited_color_range)
9194 val |= PIPECONF_COLOR_RANGE_SELECT;
9195
9196 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9197
9198 I915_WRITE(PIPECONF(pipe), val);
9199 POSTING_READ(PIPECONF(pipe));
9200 }
9201
9202 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9203 {
9204 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9205 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9206 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9207 u32 val = 0;
9208
9209 if (IS_HASWELL(dev_priv) && crtc_state->dither)
9210 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9211
9212 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9213 val |= PIPECONF_INTERLACED_ILK;
9214 else
9215 val |= PIPECONF_PROGRESSIVE;
9216
9217 I915_WRITE(PIPECONF(cpu_transcoder), val);
9218 POSTING_READ(PIPECONF(cpu_transcoder));
9219 }
9220
9221 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9222 {
9223 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9225 u32 val = 0;
9226
9227 switch (crtc_state->pipe_bpp) {
9228 case 18:
9229 val |= PIPEMISC_DITHER_6_BPC;
9230 break;
9231 case 24:
9232 val |= PIPEMISC_DITHER_8_BPC;
9233 break;
9234 case 30:
9235 val |= PIPEMISC_DITHER_10_BPC;
9236 break;
9237 case 36:
9238 val |= PIPEMISC_DITHER_12_BPC;
9239 break;
9240 default:
9241 MISSING_CASE(crtc_state->pipe_bpp);
9242 break;
9243 }
9244
9245 if (crtc_state->dither)
9246 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9247
9248 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9249 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9250 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9251
9252 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9253 val |= PIPEMISC_YUV420_ENABLE |
9254 PIPEMISC_YUV420_MODE_FULL_BLEND;
9255
9256 if (INTEL_GEN(dev_priv) >= 11 &&
9257 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9258 BIT(PLANE_CURSOR))) == 0)
9259 val |= PIPEMISC_HDR_MODE_PRECISION;
9260
9261 I915_WRITE(PIPEMISC(crtc->pipe), val);
9262 }
9263
9264 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9265 {
9266 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9267 u32 tmp;
9268
9269 tmp = I915_READ(PIPEMISC(crtc->pipe));
9270
9271 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9272 case PIPEMISC_DITHER_6_BPC:
9273 return 18;
9274 case PIPEMISC_DITHER_8_BPC:
9275 return 24;
9276 case PIPEMISC_DITHER_10_BPC:
9277 return 30;
9278 case PIPEMISC_DITHER_12_BPC:
9279 return 36;
9280 default:
9281 MISSING_CASE(tmp);
9282 return 0;
9283 }
9284 }
9285
9286 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9287 {
9288 /*
9289 * Account for spread spectrum to avoid
9290 * oversubscribing the link. Max center spread
9291 * is 2.5%; use 5% for safety's sake.
9292 */
9293 u32 bps = target_clock * bpp * 21 / 20;
9294 return DIV_ROUND_UP(bps, link_bw * 8);
9295 }
9296
9297 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9298 {
9299 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9300 }
9301
9302 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9303 struct intel_crtc_state *crtc_state,
9304 struct dpll *reduced_clock)
9305 {
9306 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9307 u32 dpll, fp, fp2;
9308 int factor;
9309
9310 /* Enable autotuning of the PLL clock (if permissible) */
9311 factor = 21;
9312 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9313 if ((intel_panel_use_ssc(dev_priv) &&
9314 dev_priv->vbt.lvds_ssc_freq == 100000) ||
9315 (HAS_PCH_IBX(dev_priv) &&
9316 intel_is_dual_link_lvds(dev_priv)))
9317 factor = 25;
9318 } else if (crtc_state->sdvo_tv_clock) {
9319 factor = 20;
9320 }
9321
9322 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9323
9324 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9325 fp |= FP_CB_TUNE;
9326
9327 if (reduced_clock) {
9328 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9329
9330 if (reduced_clock->m < factor * reduced_clock->n)
9331 fp2 |= FP_CB_TUNE;
9332 } else {
9333 fp2 = fp;
9334 }
9335
9336 dpll = 0;
9337
9338 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9339 dpll |= DPLLB_MODE_LVDS;
9340 else
9341 dpll |= DPLLB_MODE_DAC_SERIAL;
9342
9343 dpll |= (crtc_state->pixel_multiplier - 1)
9344 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9345
9346 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9347 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9348 dpll |= DPLL_SDVO_HIGH_SPEED;
9349
9350 if (intel_crtc_has_dp_encoder(crtc_state))
9351 dpll |= DPLL_SDVO_HIGH_SPEED;
9352
9353 /*
9354 * The high speed IO clock is only really required for
9355 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9356 * possible to share the DPLL between CRT and HDMI. Enabling
9357 * the clock needlessly does no real harm, except use up a
9358 * bit of power potentially.
9359 *
9360 * We'll limit this to IVB with 3 pipes, since it has only two
9361 * DPLLs and so DPLL sharing is the only way to get three pipes
9362 * driving PCH ports at the same time. On SNB we could do this,
9363 * and potentially avoid enabling the second DPLL, but it's not
9364 * clear if it''s a win or loss power wise. No point in doing
9365 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9366 */
9367 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9368 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9369 dpll |= DPLL_SDVO_HIGH_SPEED;
9370
9371 /* compute bitmask from p1 value */
9372 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9373 /* also FPA1 */
9374 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9375
9376 switch (crtc_state->dpll.p2) {
9377 case 5:
9378 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9379 break;
9380 case 7:
9381 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9382 break;
9383 case 10:
9384 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9385 break;
9386 case 14:
9387 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9388 break;
9389 }
9390
9391 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9392 intel_panel_use_ssc(dev_priv))
9393 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9394 else
9395 dpll |= PLL_REF_INPUT_DREFCLK;
9396
9397 dpll |= DPLL_VCO_ENABLE;
9398
9399 crtc_state->dpll_hw_state.dpll = dpll;
9400 crtc_state->dpll_hw_state.fp0 = fp;
9401 crtc_state->dpll_hw_state.fp1 = fp2;
9402 }
9403
9404 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9405 struct intel_crtc_state *crtc_state)
9406 {
9407 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9408 const struct intel_limit *limit;
9409 int refclk = 120000;
9410
9411 memset(&crtc_state->dpll_hw_state, 0,
9412 sizeof(crtc_state->dpll_hw_state));
9413
9414 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9415 if (!crtc_state->has_pch_encoder)
9416 return 0;
9417
9418 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9419 if (intel_panel_use_ssc(dev_priv)) {
9420 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9421 dev_priv->vbt.lvds_ssc_freq);
9422 refclk = dev_priv->vbt.lvds_ssc_freq;
9423 }
9424
9425 if (intel_is_dual_link_lvds(dev_priv)) {
9426 if (refclk == 100000)
9427 limit = &intel_limits_ironlake_dual_lvds_100m;
9428 else
9429 limit = &intel_limits_ironlake_dual_lvds;
9430 } else {
9431 if (refclk == 100000)
9432 limit = &intel_limits_ironlake_single_lvds_100m;
9433 else
9434 limit = &intel_limits_ironlake_single_lvds;
9435 }
9436 } else {
9437 limit = &intel_limits_ironlake_dac;
9438 }
9439
9440 if (!crtc_state->clock_set &&
9441 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9442 refclk, NULL, &crtc_state->dpll)) {
9443 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9444 return -EINVAL;
9445 }
9446
9447 ironlake_compute_dpll(crtc, crtc_state, NULL);
9448
9449 if (!intel_get_shared_dpll(crtc_state, NULL)) {
9450 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9451 pipe_name(crtc->pipe));
9452 return -EINVAL;
9453 }
9454
9455 return 0;
9456 }
9457
9458 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9459 struct intel_link_m_n *m_n)
9460 {
9461 struct drm_device *dev = crtc->base.dev;
9462 struct drm_i915_private *dev_priv = to_i915(dev);
9463 enum pipe pipe = crtc->pipe;
9464
9465 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9466 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9467 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9468 & ~TU_SIZE_MASK;
9469 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9470 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9471 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9472 }
9473
9474 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9475 enum transcoder transcoder,
9476 struct intel_link_m_n *m_n,
9477 struct intel_link_m_n *m2_n2)
9478 {
9479 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9480 enum pipe pipe = crtc->pipe;
9481
9482 if (INTEL_GEN(dev_priv) >= 5) {
9483 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9484 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9485 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9486 & ~TU_SIZE_MASK;
9487 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9488 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9489 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9490
9491 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9492 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9493 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9494 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9495 & ~TU_SIZE_MASK;
9496 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9497 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9498 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9499 }
9500 } else {
9501 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9502 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9503 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9504 & ~TU_SIZE_MASK;
9505 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9506 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9507 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9508 }
9509 }
9510
9511 void intel_dp_get_m_n(struct intel_crtc *crtc,
9512 struct intel_crtc_state *pipe_config)
9513 {
9514 if (pipe_config->has_pch_encoder)
9515 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9516 else
9517 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9518 &pipe_config->dp_m_n,
9519 &pipe_config->dp_m2_n2);
9520 }
9521
9522 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9523 struct intel_crtc_state *pipe_config)
9524 {
9525 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9526 &pipe_config->fdi_m_n, NULL);
9527 }
9528
9529 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9530 struct intel_crtc_state *pipe_config)
9531 {
9532 struct drm_device *dev = crtc->base.dev;
9533 struct drm_i915_private *dev_priv = to_i915(dev);
9534 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9535 u32 ps_ctrl = 0;
9536 int id = -1;
9537 int i;
9538
9539 /* find scaler attached to this pipe */
9540 for (i = 0; i < crtc->num_scalers; i++) {
9541 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9542 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9543 id = i;
9544 pipe_config->pch_pfit.enabled = true;
9545 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9546 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9547 scaler_state->scalers[i].in_use = true;
9548 break;
9549 }
9550 }
9551
9552 scaler_state->scaler_id = id;
9553 if (id >= 0) {
9554 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9555 } else {
9556 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9557 }
9558 }
9559
9560 static void
9561 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9562 struct intel_initial_plane_config *plane_config)
9563 {
9564 struct drm_device *dev = crtc->base.dev;
9565 struct drm_i915_private *dev_priv = to_i915(dev);
9566 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9567 enum plane_id plane_id = plane->id;
9568 enum pipe pipe;
9569 u32 val, base, offset, stride_mult, tiling, alpha;
9570 int fourcc, pixel_format;
9571 unsigned int aligned_height;
9572 struct drm_framebuffer *fb;
9573 struct intel_framebuffer *intel_fb;
9574
9575 if (!plane->get_hw_state(plane, &pipe))
9576 return;
9577
9578 WARN_ON(pipe != crtc->pipe);
9579
9580 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9581 if (!intel_fb) {
9582 DRM_DEBUG_KMS("failed to alloc fb\n");
9583 return;
9584 }
9585
9586 fb = &intel_fb->base;
9587
9588 fb->dev = dev;
9589
9590 val = I915_READ(PLANE_CTL(pipe, plane_id));
9591
9592 if (INTEL_GEN(dev_priv) >= 11)
9593 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9594 else
9595 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9596
9597 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9598 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9599 alpha &= PLANE_COLOR_ALPHA_MASK;
9600 } else {
9601 alpha = val & PLANE_CTL_ALPHA_MASK;
9602 }
9603
9604 fourcc = skl_format_to_fourcc(pixel_format,
9605 val & PLANE_CTL_ORDER_RGBX, alpha);
9606 fb->format = drm_format_info(fourcc);
9607
9608 tiling = val & PLANE_CTL_TILED_MASK;
9609 switch (tiling) {
9610 case PLANE_CTL_TILED_LINEAR:
9611 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9612 break;
9613 case PLANE_CTL_TILED_X:
9614 plane_config->tiling = I915_TILING_X;
9615 fb->modifier = I915_FORMAT_MOD_X_TILED;
9616 break;
9617 case PLANE_CTL_TILED_Y:
9618 plane_config->tiling = I915_TILING_Y;
9619 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9620 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9621 else
9622 fb->modifier = I915_FORMAT_MOD_Y_TILED;
9623 break;
9624 case PLANE_CTL_TILED_YF:
9625 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9626 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9627 else
9628 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9629 break;
9630 default:
9631 MISSING_CASE(tiling);
9632 goto error;
9633 }
9634
9635 /*
9636 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9637 * while i915 HW rotation is clockwise, thats why this swapping.
9638 */
9639 switch (val & PLANE_CTL_ROTATE_MASK) {
9640 case PLANE_CTL_ROTATE_0:
9641 plane_config->rotation = DRM_MODE_ROTATE_0;
9642 break;
9643 case PLANE_CTL_ROTATE_90:
9644 plane_config->rotation = DRM_MODE_ROTATE_270;
9645 break;
9646 case PLANE_CTL_ROTATE_180:
9647 plane_config->rotation = DRM_MODE_ROTATE_180;
9648 break;
9649 case PLANE_CTL_ROTATE_270:
9650 plane_config->rotation = DRM_MODE_ROTATE_90;
9651 break;
9652 }
9653
9654 if (INTEL_GEN(dev_priv) >= 10 &&
9655 val & PLANE_CTL_FLIP_HORIZONTAL)
9656 plane_config->rotation |= DRM_MODE_REFLECT_X;
9657
9658 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9659 plane_config->base = base;
9660
9661 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9662
9663 val = I915_READ(PLANE_SIZE(pipe, plane_id));
9664 fb->height = ((val >> 16) & 0xfff) + 1;
9665 fb->width = ((val >> 0) & 0x1fff) + 1;
9666
9667 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9668 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9669 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9670
9671 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9672
9673 plane_config->size = fb->pitches[0] * aligned_height;
9674
9675 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9676 crtc->base.name, plane->base.name, fb->width, fb->height,
9677 fb->format->cpp[0] * 8, base, fb->pitches[0],
9678 plane_config->size);
9679
9680 plane_config->fb = intel_fb;
9681 return;
9682
9683 error:
9684 kfree(intel_fb);
9685 }
9686
9687 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9688 struct intel_crtc_state *pipe_config)
9689 {
9690 struct drm_device *dev = crtc->base.dev;
9691 struct drm_i915_private *dev_priv = to_i915(dev);
9692 u32 tmp;
9693
9694 tmp = I915_READ(PF_CTL(crtc->pipe));
9695
9696 if (tmp & PF_ENABLE) {
9697 pipe_config->pch_pfit.enabled = true;
9698 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9699 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9700
9701 /* We currently do not free assignements of panel fitters on
9702 * ivb/hsw (since we don't use the higher upscaling modes which
9703 * differentiates them) so just WARN about this case for now. */
9704 if (IS_GEN(dev_priv, 7)) {
9705 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9706 PF_PIPE_SEL_IVB(crtc->pipe));
9707 }
9708 }
9709 }
9710
9711 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9712 struct intel_crtc_state *pipe_config)
9713 {
9714 struct drm_device *dev = crtc->base.dev;
9715 struct drm_i915_private *dev_priv = to_i915(dev);
9716 enum intel_display_power_domain power_domain;
9717 intel_wakeref_t wakeref;
9718 u32 tmp;
9719 bool ret;
9720
9721 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9722 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9723 if (!wakeref)
9724 return false;
9725
9726 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9727 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9728 pipe_config->shared_dpll = NULL;
9729
9730 ret = false;
9731 tmp = I915_READ(PIPECONF(crtc->pipe));
9732 if (!(tmp & PIPECONF_ENABLE))
9733 goto out;
9734
9735 switch (tmp & PIPECONF_BPC_MASK) {
9736 case PIPECONF_6BPC:
9737 pipe_config->pipe_bpp = 18;
9738 break;
9739 case PIPECONF_8BPC:
9740 pipe_config->pipe_bpp = 24;
9741 break;
9742 case PIPECONF_10BPC:
9743 pipe_config->pipe_bpp = 30;
9744 break;
9745 case PIPECONF_12BPC:
9746 pipe_config->pipe_bpp = 36;
9747 break;
9748 default:
9749 break;
9750 }
9751
9752 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9753 pipe_config->limited_color_range = true;
9754
9755 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9756 PIPECONF_GAMMA_MODE_SHIFT;
9757
9758 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9759
9760 i9xx_get_pipe_color_config(pipe_config);
9761
9762 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9763 struct intel_shared_dpll *pll;
9764 enum intel_dpll_id pll_id;
9765
9766 pipe_config->has_pch_encoder = true;
9767
9768 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9769 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9770 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9771
9772 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9773
9774 if (HAS_PCH_IBX(dev_priv)) {
9775 /*
9776 * The pipe->pch transcoder and pch transcoder->pll
9777 * mapping is fixed.
9778 */
9779 pll_id = (enum intel_dpll_id) crtc->pipe;
9780 } else {
9781 tmp = I915_READ(PCH_DPLL_SEL);
9782 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9783 pll_id = DPLL_ID_PCH_PLL_B;
9784 else
9785 pll_id= DPLL_ID_PCH_PLL_A;
9786 }
9787
9788 pipe_config->shared_dpll =
9789 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9790 pll = pipe_config->shared_dpll;
9791
9792 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9793 &pipe_config->dpll_hw_state));
9794
9795 tmp = pipe_config->dpll_hw_state.dpll;
9796 pipe_config->pixel_multiplier =
9797 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9798 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9799
9800 ironlake_pch_clock_get(crtc, pipe_config);
9801 } else {
9802 pipe_config->pixel_multiplier = 1;
9803 }
9804
9805 intel_get_pipe_timings(crtc, pipe_config);
9806 intel_get_pipe_src_size(crtc, pipe_config);
9807
9808 ironlake_get_pfit_config(crtc, pipe_config);
9809
9810 ret = true;
9811
9812 out:
9813 intel_display_power_put(dev_priv, power_domain, wakeref);
9814
9815 return ret;
9816 }
9817 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9818 struct intel_crtc_state *crtc_state)
9819 {
9820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9821 struct intel_atomic_state *state =
9822 to_intel_atomic_state(crtc_state->base.state);
9823
9824 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9825 INTEL_GEN(dev_priv) >= 11) {
9826 struct intel_encoder *encoder =
9827 intel_get_crtc_new_encoder(state, crtc_state);
9828
9829 if (!intel_get_shared_dpll(crtc_state, encoder)) {
9830 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9831 pipe_name(crtc->pipe));
9832 return -EINVAL;
9833 }
9834 }
9835
9836 return 0;
9837 }
9838
9839 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9840 enum port port,
9841 struct intel_crtc_state *pipe_config)
9842 {
9843 enum intel_dpll_id id;
9844 u32 temp;
9845
9846 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9847 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9848
9849 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9850 return;
9851
9852 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9853 }
9854
9855 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9856 enum port port,
9857 struct intel_crtc_state *pipe_config)
9858 {
9859 enum intel_dpll_id id;
9860 u32 temp;
9861
9862 /* TODO: TBT pll not implemented. */
9863 if (intel_port_is_combophy(dev_priv, port)) {
9864 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9865 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9866 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9867 } else if (intel_port_is_tc(dev_priv, port)) {
9868 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9869 } else {
9870 WARN(1, "Invalid port %x\n", port);
9871 return;
9872 }
9873
9874 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9875 }
9876
9877 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9878 enum port port,
9879 struct intel_crtc_state *pipe_config)
9880 {
9881 enum intel_dpll_id id;
9882
9883 switch (port) {
9884 case PORT_A:
9885 id = DPLL_ID_SKL_DPLL0;
9886 break;
9887 case PORT_B:
9888 id = DPLL_ID_SKL_DPLL1;
9889 break;
9890 case PORT_C:
9891 id = DPLL_ID_SKL_DPLL2;
9892 break;
9893 default:
9894 DRM_ERROR("Incorrect port type\n");
9895 return;
9896 }
9897
9898 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9899 }
9900
9901 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9902 enum port port,
9903 struct intel_crtc_state *pipe_config)
9904 {
9905 enum intel_dpll_id id;
9906 u32 temp;
9907
9908 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9909 id = temp >> (port * 3 + 1);
9910
9911 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9912 return;
9913
9914 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9915 }
9916
9917 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9918 enum port port,
9919 struct intel_crtc_state *pipe_config)
9920 {
9921 enum intel_dpll_id id;
9922 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9923
9924 switch (ddi_pll_sel) {
9925 case PORT_CLK_SEL_WRPLL1:
9926 id = DPLL_ID_WRPLL1;
9927 break;
9928 case PORT_CLK_SEL_WRPLL2:
9929 id = DPLL_ID_WRPLL2;
9930 break;
9931 case PORT_CLK_SEL_SPLL:
9932 id = DPLL_ID_SPLL;
9933 break;
9934 case PORT_CLK_SEL_LCPLL_810:
9935 id = DPLL_ID_LCPLL_810;
9936 break;
9937 case PORT_CLK_SEL_LCPLL_1350:
9938 id = DPLL_ID_LCPLL_1350;
9939 break;
9940 case PORT_CLK_SEL_LCPLL_2700:
9941 id = DPLL_ID_LCPLL_2700;
9942 break;
9943 default:
9944 MISSING_CASE(ddi_pll_sel);
9945 /* fall through */
9946 case PORT_CLK_SEL_NONE:
9947 return;
9948 }
9949
9950 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9951 }
9952
9953 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9954 struct intel_crtc_state *pipe_config,
9955 u64 *power_domain_mask,
9956 intel_wakeref_t *wakerefs)
9957 {
9958 struct drm_device *dev = crtc->base.dev;
9959 struct drm_i915_private *dev_priv = to_i915(dev);
9960 enum intel_display_power_domain power_domain;
9961 unsigned long panel_transcoder_mask = 0;
9962 unsigned long enabled_panel_transcoders = 0;
9963 enum transcoder panel_transcoder;
9964 intel_wakeref_t wf;
9965 u32 tmp;
9966
9967 if (INTEL_GEN(dev_priv) >= 11)
9968 panel_transcoder_mask |=
9969 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9970
9971 if (HAS_TRANSCODER_EDP(dev_priv))
9972 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
9973
9974 /*
9975 * The pipe->transcoder mapping is fixed with the exception of the eDP
9976 * and DSI transcoders handled below.
9977 */
9978 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9979
9980 /*
9981 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9982 * consistency and less surprising code; it's in always on power).
9983 */
9984 for_each_set_bit(panel_transcoder,
9985 &panel_transcoder_mask,
9986 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9987 bool force_thru = false;
9988 enum pipe trans_pipe;
9989
9990 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9991 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9992 continue;
9993
9994 /*
9995 * Log all enabled ones, only use the first one.
9996 *
9997 * FIXME: This won't work for two separate DSI displays.
9998 */
9999 enabled_panel_transcoders |= BIT(panel_transcoder);
10000 if (enabled_panel_transcoders != BIT(panel_transcoder))
10001 continue;
10002
10003 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10004 default:
10005 WARN(1, "unknown pipe linked to transcoder %s\n",
10006 transcoder_name(panel_transcoder));
10007 /* fall through */
10008 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10009 force_thru = true;
10010 /* fall through */
10011 case TRANS_DDI_EDP_INPUT_A_ON:
10012 trans_pipe = PIPE_A;
10013 break;
10014 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10015 trans_pipe = PIPE_B;
10016 break;
10017 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10018 trans_pipe = PIPE_C;
10019 break;
10020 }
10021
10022 if (trans_pipe == crtc->pipe) {
10023 pipe_config->cpu_transcoder = panel_transcoder;
10024 pipe_config->pch_pfit.force_thru = force_thru;
10025 }
10026 }
10027
10028 /*
10029 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10030 */
10031 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10032 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10033
10034 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10035 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10036
10037 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10038 if (!wf)
10039 return false;
10040
10041 wakerefs[power_domain] = wf;
10042 *power_domain_mask |= BIT_ULL(power_domain);
10043
10044 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10045
10046 return tmp & PIPECONF_ENABLE;
10047 }
10048
10049 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10050 struct intel_crtc_state *pipe_config,
10051 u64 *power_domain_mask,
10052 intel_wakeref_t *wakerefs)
10053 {
10054 struct drm_device *dev = crtc->base.dev;
10055 struct drm_i915_private *dev_priv = to_i915(dev);
10056 enum intel_display_power_domain power_domain;
10057 enum transcoder cpu_transcoder;
10058 intel_wakeref_t wf;
10059 enum port port;
10060 u32 tmp;
10061
10062 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10063 if (port == PORT_A)
10064 cpu_transcoder = TRANSCODER_DSI_A;
10065 else
10066 cpu_transcoder = TRANSCODER_DSI_C;
10067
10068 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10069 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10070
10071 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10072 if (!wf)
10073 continue;
10074
10075 wakerefs[power_domain] = wf;
10076 *power_domain_mask |= BIT_ULL(power_domain);
10077
10078 /*
10079 * The PLL needs to be enabled with a valid divider
10080 * configuration, otherwise accessing DSI registers will hang
10081 * the machine. See BSpec North Display Engine
10082 * registers/MIPI[BXT]. We can break out here early, since we
10083 * need the same DSI PLL to be enabled for both DSI ports.
10084 */
10085 if (!bxt_dsi_pll_is_enabled(dev_priv))
10086 break;
10087
10088 /* XXX: this works for video mode only */
10089 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10090 if (!(tmp & DPI_ENABLE))
10091 continue;
10092
10093 tmp = I915_READ(MIPI_CTRL(port));
10094 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10095 continue;
10096
10097 pipe_config->cpu_transcoder = cpu_transcoder;
10098 break;
10099 }
10100
10101 return transcoder_is_dsi(pipe_config->cpu_transcoder);
10102 }
10103
10104 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10105 struct intel_crtc_state *pipe_config)
10106 {
10107 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10108 struct intel_shared_dpll *pll;
10109 enum port port;
10110 u32 tmp;
10111
10112 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10113
10114 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10115
10116 if (INTEL_GEN(dev_priv) >= 11)
10117 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10118 else if (IS_CANNONLAKE(dev_priv))
10119 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10120 else if (IS_GEN9_BC(dev_priv))
10121 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10122 else if (IS_GEN9_LP(dev_priv))
10123 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10124 else
10125 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10126
10127 pll = pipe_config->shared_dpll;
10128 if (pll) {
10129 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10130 &pipe_config->dpll_hw_state));
10131 }
10132
10133 /*
10134 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10135 * DDI E. So just check whether this pipe is wired to DDI E and whether
10136 * the PCH transcoder is on.
10137 */
10138 if (INTEL_GEN(dev_priv) < 9 &&
10139 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10140 pipe_config->has_pch_encoder = true;
10141
10142 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10143 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10144 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10145
10146 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10147 }
10148 }
10149
10150 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10151 struct intel_crtc_state *pipe_config)
10152 {
10153 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10154 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10155 enum intel_display_power_domain power_domain;
10156 u64 power_domain_mask;
10157 bool active;
10158
10159 intel_crtc_init_scalers(crtc, pipe_config);
10160
10161 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10162 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10163 if (!wf)
10164 return false;
10165
10166 wakerefs[power_domain] = wf;
10167 power_domain_mask = BIT_ULL(power_domain);
10168
10169 pipe_config->shared_dpll = NULL;
10170
10171 active = hsw_get_transcoder_state(crtc, pipe_config,
10172 &power_domain_mask, wakerefs);
10173
10174 if (IS_GEN9_LP(dev_priv) &&
10175 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10176 &power_domain_mask, wakerefs)) {
10177 WARN_ON(active);
10178 active = true;
10179 }
10180
10181 if (!active)
10182 goto out;
10183
10184 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10185 INTEL_GEN(dev_priv) >= 11) {
10186 haswell_get_ddi_port_state(crtc, pipe_config);
10187 intel_get_pipe_timings(crtc, pipe_config);
10188 }
10189
10190 intel_get_pipe_src_size(crtc, pipe_config);
10191 intel_get_crtc_ycbcr_config(crtc, pipe_config);
10192
10193 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10194
10195 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10196
10197 if (INTEL_GEN(dev_priv) >= 9) {
10198 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10199
10200 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10201 pipe_config->gamma_enable = true;
10202
10203 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10204 pipe_config->csc_enable = true;
10205 } else {
10206 i9xx_get_pipe_color_config(pipe_config);
10207 }
10208
10209 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10210 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10211
10212 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10213 if (wf) {
10214 wakerefs[power_domain] = wf;
10215 power_domain_mask |= BIT_ULL(power_domain);
10216
10217 if (INTEL_GEN(dev_priv) >= 9)
10218 skylake_get_pfit_config(crtc, pipe_config);
10219 else
10220 ironlake_get_pfit_config(crtc, pipe_config);
10221 }
10222
10223 if (hsw_crtc_supports_ips(crtc)) {
10224 if (IS_HASWELL(dev_priv))
10225 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10226 else {
10227 /*
10228 * We cannot readout IPS state on broadwell, set to
10229 * true so we can set it to a defined state on first
10230 * commit.
10231 */
10232 pipe_config->ips_enabled = true;
10233 }
10234 }
10235
10236 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10237 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10238 pipe_config->pixel_multiplier =
10239 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10240 } else {
10241 pipe_config->pixel_multiplier = 1;
10242 }
10243
10244 out:
10245 for_each_power_domain(power_domain, power_domain_mask)
10246 intel_display_power_put(dev_priv,
10247 power_domain, wakerefs[power_domain]);
10248
10249 return active;
10250 }
10251
10252 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10253 {
10254 struct drm_i915_private *dev_priv =
10255 to_i915(plane_state->base.plane->dev);
10256 const struct drm_framebuffer *fb = plane_state->base.fb;
10257 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10258 u32 base;
10259
10260 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10261 base = obj->phys_handle->busaddr;
10262 else
10263 base = intel_plane_ggtt_offset(plane_state);
10264
10265 base += plane_state->color_plane[0].offset;
10266
10267 /* ILK+ do this automagically */
10268 if (HAS_GMCH(dev_priv) &&
10269 plane_state->base.rotation & DRM_MODE_ROTATE_180)
10270 base += (plane_state->base.crtc_h *
10271 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10272
10273 return base;
10274 }
10275
10276 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10277 {
10278 int x = plane_state->base.crtc_x;
10279 int y = plane_state->base.crtc_y;
10280 u32 pos = 0;
10281
10282 if (x < 0) {
10283 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10284 x = -x;
10285 }
10286 pos |= x << CURSOR_X_SHIFT;
10287
10288 if (y < 0) {
10289 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10290 y = -y;
10291 }
10292 pos |= y << CURSOR_Y_SHIFT;
10293
10294 return pos;
10295 }
10296
10297 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10298 {
10299 const struct drm_mode_config *config =
10300 &plane_state->base.plane->dev->mode_config;
10301 int width = plane_state->base.crtc_w;
10302 int height = plane_state->base.crtc_h;
10303
10304 return width > 0 && width <= config->cursor_width &&
10305 height > 0 && height <= config->cursor_height;
10306 }
10307
10308 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10309 {
10310 int src_x, src_y;
10311 u32 offset;
10312 int ret;
10313
10314 ret = intel_plane_compute_gtt(plane_state);
10315 if (ret)
10316 return ret;
10317
10318 if (!plane_state->base.visible)
10319 return 0;
10320
10321 src_x = plane_state->base.src_x >> 16;
10322 src_y = plane_state->base.src_y >> 16;
10323
10324 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10325 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10326 plane_state, 0);
10327
10328 if (src_x != 0 || src_y != 0) {
10329 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10330 return -EINVAL;
10331 }
10332
10333 plane_state->color_plane[0].offset = offset;
10334
10335 return 0;
10336 }
10337
10338 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10339 struct intel_plane_state *plane_state)
10340 {
10341 const struct drm_framebuffer *fb = plane_state->base.fb;
10342 int ret;
10343
10344 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10345 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10346 return -EINVAL;
10347 }
10348
10349 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10350 &crtc_state->base,
10351 DRM_PLANE_HELPER_NO_SCALING,
10352 DRM_PLANE_HELPER_NO_SCALING,
10353 true, true);
10354 if (ret)
10355 return ret;
10356
10357 ret = intel_cursor_check_surface(plane_state);
10358 if (ret)
10359 return ret;
10360
10361 if (!plane_state->base.visible)
10362 return 0;
10363
10364 ret = intel_plane_check_src_coordinates(plane_state);
10365 if (ret)
10366 return ret;
10367
10368 return 0;
10369 }
10370
10371 static unsigned int
10372 i845_cursor_max_stride(struct intel_plane *plane,
10373 u32 pixel_format, u64 modifier,
10374 unsigned int rotation)
10375 {
10376 return 2048;
10377 }
10378
10379 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10380 {
10381 u32 cntl = 0;
10382
10383 if (crtc_state->gamma_enable)
10384 cntl |= CURSOR_GAMMA_ENABLE;
10385
10386 return cntl;
10387 }
10388
10389 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10390 const struct intel_plane_state *plane_state)
10391 {
10392 return CURSOR_ENABLE |
10393 CURSOR_FORMAT_ARGB |
10394 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10395 }
10396
10397 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10398 {
10399 int width = plane_state->base.crtc_w;
10400
10401 /*
10402 * 845g/865g are only limited by the width of their cursors,
10403 * the height is arbitrary up to the precision of the register.
10404 */
10405 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10406 }
10407
10408 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10409 struct intel_plane_state *plane_state)
10410 {
10411 const struct drm_framebuffer *fb = plane_state->base.fb;
10412 int ret;
10413
10414 ret = intel_check_cursor(crtc_state, plane_state);
10415 if (ret)
10416 return ret;
10417
10418 /* if we want to turn off the cursor ignore width and height */
10419 if (!fb)
10420 return 0;
10421
10422 /* Check for which cursor types we support */
10423 if (!i845_cursor_size_ok(plane_state)) {
10424 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10425 plane_state->base.crtc_w,
10426 plane_state->base.crtc_h);
10427 return -EINVAL;
10428 }
10429
10430 WARN_ON(plane_state->base.visible &&
10431 plane_state->color_plane[0].stride != fb->pitches[0]);
10432
10433 switch (fb->pitches[0]) {
10434 case 256:
10435 case 512:
10436 case 1024:
10437 case 2048:
10438 break;
10439 default:
10440 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10441 fb->pitches[0]);
10442 return -EINVAL;
10443 }
10444
10445 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10446
10447 return 0;
10448 }
10449
10450 static void i845_update_cursor(struct intel_plane *plane,
10451 const struct intel_crtc_state *crtc_state,
10452 const struct intel_plane_state *plane_state)
10453 {
10454 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10455 u32 cntl = 0, base = 0, pos = 0, size = 0;
10456 unsigned long irqflags;
10457
10458 if (plane_state && plane_state->base.visible) {
10459 unsigned int width = plane_state->base.crtc_w;
10460 unsigned int height = plane_state->base.crtc_h;
10461
10462 cntl = plane_state->ctl |
10463 i845_cursor_ctl_crtc(crtc_state);
10464
10465 size = (height << 12) | width;
10466
10467 base = intel_cursor_base(plane_state);
10468 pos = intel_cursor_position(plane_state);
10469 }
10470
10471 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10472
10473 /* On these chipsets we can only modify the base/size/stride
10474 * whilst the cursor is disabled.
10475 */
10476 if (plane->cursor.base != base ||
10477 plane->cursor.size != size ||
10478 plane->cursor.cntl != cntl) {
10479 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10480 I915_WRITE_FW(CURBASE(PIPE_A), base);
10481 I915_WRITE_FW(CURSIZE, size);
10482 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10483 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10484
10485 plane->cursor.base = base;
10486 plane->cursor.size = size;
10487 plane->cursor.cntl = cntl;
10488 } else {
10489 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10490 }
10491
10492 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10493 }
10494
10495 static void i845_disable_cursor(struct intel_plane *plane,
10496 const struct intel_crtc_state *crtc_state)
10497 {
10498 i845_update_cursor(plane, crtc_state, NULL);
10499 }
10500
10501 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10502 enum pipe *pipe)
10503 {
10504 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10505 enum intel_display_power_domain power_domain;
10506 intel_wakeref_t wakeref;
10507 bool ret;
10508
10509 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10510 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10511 if (!wakeref)
10512 return false;
10513
10514 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10515
10516 *pipe = PIPE_A;
10517
10518 intel_display_power_put(dev_priv, power_domain, wakeref);
10519
10520 return ret;
10521 }
10522
10523 static unsigned int
10524 i9xx_cursor_max_stride(struct intel_plane *plane,
10525 u32 pixel_format, u64 modifier,
10526 unsigned int rotation)
10527 {
10528 return plane->base.dev->mode_config.cursor_width * 4;
10529 }
10530
10531 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10532 {
10533 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10535 u32 cntl = 0;
10536
10537 if (INTEL_GEN(dev_priv) >= 11)
10538 return cntl;
10539
10540 if (crtc_state->gamma_enable)
10541 cntl = MCURSOR_GAMMA_ENABLE;
10542
10543 if (crtc_state->csc_enable)
10544 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10545
10546 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10547 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10548
10549 return cntl;
10550 }
10551
10552 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10553 const struct intel_plane_state *plane_state)
10554 {
10555 struct drm_i915_private *dev_priv =
10556 to_i915(plane_state->base.plane->dev);
10557 u32 cntl = 0;
10558
10559 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10560 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10561
10562 switch (plane_state->base.crtc_w) {
10563 case 64:
10564 cntl |= MCURSOR_MODE_64_ARGB_AX;
10565 break;
10566 case 128:
10567 cntl |= MCURSOR_MODE_128_ARGB_AX;
10568 break;
10569 case 256:
10570 cntl |= MCURSOR_MODE_256_ARGB_AX;
10571 break;
10572 default:
10573 MISSING_CASE(plane_state->base.crtc_w);
10574 return 0;
10575 }
10576
10577 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10578 cntl |= MCURSOR_ROTATE_180;
10579
10580 return cntl;
10581 }
10582
10583 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10584 {
10585 struct drm_i915_private *dev_priv =
10586 to_i915(plane_state->base.plane->dev);
10587 int width = plane_state->base.crtc_w;
10588 int height = plane_state->base.crtc_h;
10589
10590 if (!intel_cursor_size_ok(plane_state))
10591 return false;
10592
10593 /* Cursor width is limited to a few power-of-two sizes */
10594 switch (width) {
10595 case 256:
10596 case 128:
10597 case 64:
10598 break;
10599 default:
10600 return false;
10601 }
10602
10603 /*
10604 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10605 * height from 8 lines up to the cursor width, when the
10606 * cursor is not rotated. Everything else requires square
10607 * cursors.
10608 */
10609 if (HAS_CUR_FBC(dev_priv) &&
10610 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10611 if (height < 8 || height > width)
10612 return false;
10613 } else {
10614 if (height != width)
10615 return false;
10616 }
10617
10618 return true;
10619 }
10620
10621 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10622 struct intel_plane_state *plane_state)
10623 {
10624 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10625 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10626 const struct drm_framebuffer *fb = plane_state->base.fb;
10627 enum pipe pipe = plane->pipe;
10628 int ret;
10629
10630 ret = intel_check_cursor(crtc_state, plane_state);
10631 if (ret)
10632 return ret;
10633
10634 /* if we want to turn off the cursor ignore width and height */
10635 if (!fb)
10636 return 0;
10637
10638 /* Check for which cursor types we support */
10639 if (!i9xx_cursor_size_ok(plane_state)) {
10640 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10641 plane_state->base.crtc_w,
10642 plane_state->base.crtc_h);
10643 return -EINVAL;
10644 }
10645
10646 WARN_ON(plane_state->base.visible &&
10647 plane_state->color_plane[0].stride != fb->pitches[0]);
10648
10649 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10650 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10651 fb->pitches[0], plane_state->base.crtc_w);
10652 return -EINVAL;
10653 }
10654
10655 /*
10656 * There's something wrong with the cursor on CHV pipe C.
10657 * If it straddles the left edge of the screen then
10658 * moving it away from the edge or disabling it often
10659 * results in a pipe underrun, and often that can lead to
10660 * dead pipe (constant underrun reported, and it scans
10661 * out just a solid color). To recover from that, the
10662 * display power well must be turned off and on again.
10663 * Refuse the put the cursor into that compromised position.
10664 */
10665 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10666 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10667 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10668 return -EINVAL;
10669 }
10670
10671 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10672
10673 return 0;
10674 }
10675
10676 static void i9xx_update_cursor(struct intel_plane *plane,
10677 const struct intel_crtc_state *crtc_state,
10678 const struct intel_plane_state *plane_state)
10679 {
10680 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10681 enum pipe pipe = plane->pipe;
10682 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10683 unsigned long irqflags;
10684
10685 if (plane_state && plane_state->base.visible) {
10686 cntl = plane_state->ctl |
10687 i9xx_cursor_ctl_crtc(crtc_state);
10688
10689 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10690 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10691
10692 base = intel_cursor_base(plane_state);
10693 pos = intel_cursor_position(plane_state);
10694 }
10695
10696 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10697
10698 /*
10699 * On some platforms writing CURCNTR first will also
10700 * cause CURPOS to be armed by the CURBASE write.
10701 * Without the CURCNTR write the CURPOS write would
10702 * arm itself. Thus we always update CURCNTR before
10703 * CURPOS.
10704 *
10705 * On other platforms CURPOS always requires the
10706 * CURBASE write to arm the update. Additonally
10707 * a write to any of the cursor register will cancel
10708 * an already armed cursor update. Thus leaving out
10709 * the CURBASE write after CURPOS could lead to a
10710 * cursor that doesn't appear to move, or even change
10711 * shape. Thus we always write CURBASE.
10712 *
10713 * The other registers are armed by by the CURBASE write
10714 * except when the plane is getting enabled at which time
10715 * the CURCNTR write arms the update.
10716 */
10717
10718 if (INTEL_GEN(dev_priv) >= 9)
10719 skl_write_cursor_wm(plane, crtc_state);
10720
10721 if (plane->cursor.base != base ||
10722 plane->cursor.size != fbc_ctl ||
10723 plane->cursor.cntl != cntl) {
10724 if (HAS_CUR_FBC(dev_priv))
10725 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10726 I915_WRITE_FW(CURCNTR(pipe), cntl);
10727 I915_WRITE_FW(CURPOS(pipe), pos);
10728 I915_WRITE_FW(CURBASE(pipe), base);
10729
10730 plane->cursor.base = base;
10731 plane->cursor.size = fbc_ctl;
10732 plane->cursor.cntl = cntl;
10733 } else {
10734 I915_WRITE_FW(CURPOS(pipe), pos);
10735 I915_WRITE_FW(CURBASE(pipe), base);
10736 }
10737
10738 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10739 }
10740
10741 static void i9xx_disable_cursor(struct intel_plane *plane,
10742 const struct intel_crtc_state *crtc_state)
10743 {
10744 i9xx_update_cursor(plane, crtc_state, NULL);
10745 }
10746
10747 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10748 enum pipe *pipe)
10749 {
10750 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10751 enum intel_display_power_domain power_domain;
10752 intel_wakeref_t wakeref;
10753 bool ret;
10754 u32 val;
10755
10756 /*
10757 * Not 100% correct for planes that can move between pipes,
10758 * but that's only the case for gen2-3 which don't have any
10759 * display power wells.
10760 */
10761 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10762 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10763 if (!wakeref)
10764 return false;
10765
10766 val = I915_READ(CURCNTR(plane->pipe));
10767
10768 ret = val & MCURSOR_MODE;
10769
10770 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10771 *pipe = plane->pipe;
10772 else
10773 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10774 MCURSOR_PIPE_SELECT_SHIFT;
10775
10776 intel_display_power_put(dev_priv, power_domain, wakeref);
10777
10778 return ret;
10779 }
10780
10781 /* VESA 640x480x72Hz mode to set on the pipe */
10782 static const struct drm_display_mode load_detect_mode = {
10783 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10784 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10785 };
10786
10787 struct drm_framebuffer *
10788 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10789 struct drm_mode_fb_cmd2 *mode_cmd)
10790 {
10791 struct intel_framebuffer *intel_fb;
10792 int ret;
10793
10794 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10795 if (!intel_fb)
10796 return ERR_PTR(-ENOMEM);
10797
10798 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10799 if (ret)
10800 goto err;
10801
10802 return &intel_fb->base;
10803
10804 err:
10805 kfree(intel_fb);
10806 return ERR_PTR(ret);
10807 }
10808
10809 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10810 struct drm_crtc *crtc)
10811 {
10812 struct drm_plane *plane;
10813 struct drm_plane_state *plane_state;
10814 int ret, i;
10815
10816 ret = drm_atomic_add_affected_planes(state, crtc);
10817 if (ret)
10818 return ret;
10819
10820 for_each_new_plane_in_state(state, plane, plane_state, i) {
10821 if (plane_state->crtc != crtc)
10822 continue;
10823
10824 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10825 if (ret)
10826 return ret;
10827
10828 drm_atomic_set_fb_for_plane(plane_state, NULL);
10829 }
10830
10831 return 0;
10832 }
10833
10834 int intel_get_load_detect_pipe(struct drm_connector *connector,
10835 const struct drm_display_mode *mode,
10836 struct intel_load_detect_pipe *old,
10837 struct drm_modeset_acquire_ctx *ctx)
10838 {
10839 struct intel_crtc *intel_crtc;
10840 struct intel_encoder *intel_encoder =
10841 intel_attached_encoder(connector);
10842 struct drm_crtc *possible_crtc;
10843 struct drm_encoder *encoder = &intel_encoder->base;
10844 struct drm_crtc *crtc = NULL;
10845 struct drm_device *dev = encoder->dev;
10846 struct drm_i915_private *dev_priv = to_i915(dev);
10847 struct drm_mode_config *config = &dev->mode_config;
10848 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10849 struct drm_connector_state *connector_state;
10850 struct intel_crtc_state *crtc_state;
10851 int ret, i = -1;
10852
10853 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10854 connector->base.id, connector->name,
10855 encoder->base.id, encoder->name);
10856
10857 old->restore_state = NULL;
10858
10859 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10860
10861 /*
10862 * Algorithm gets a little messy:
10863 *
10864 * - if the connector already has an assigned crtc, use it (but make
10865 * sure it's on first)
10866 *
10867 * - try to find the first unused crtc that can drive this connector,
10868 * and use that if we find one
10869 */
10870
10871 /* See if we already have a CRTC for this connector */
10872 if (connector->state->crtc) {
10873 crtc = connector->state->crtc;
10874
10875 ret = drm_modeset_lock(&crtc->mutex, ctx);
10876 if (ret)
10877 goto fail;
10878
10879 /* Make sure the crtc and connector are running */
10880 goto found;
10881 }
10882
10883 /* Find an unused one (if possible) */
10884 for_each_crtc(dev, possible_crtc) {
10885 i++;
10886 if (!(encoder->possible_crtcs & (1 << i)))
10887 continue;
10888
10889 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10890 if (ret)
10891 goto fail;
10892
10893 if (possible_crtc->state->enable) {
10894 drm_modeset_unlock(&possible_crtc->mutex);
10895 continue;
10896 }
10897
10898 crtc = possible_crtc;
10899 break;
10900 }
10901
10902 /*
10903 * If we didn't find an unused CRTC, don't use any.
10904 */
10905 if (!crtc) {
10906 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10907 ret = -ENODEV;
10908 goto fail;
10909 }
10910
10911 found:
10912 intel_crtc = to_intel_crtc(crtc);
10913
10914 state = drm_atomic_state_alloc(dev);
10915 restore_state = drm_atomic_state_alloc(dev);
10916 if (!state || !restore_state) {
10917 ret = -ENOMEM;
10918 goto fail;
10919 }
10920
10921 state->acquire_ctx = ctx;
10922 restore_state->acquire_ctx = ctx;
10923
10924 connector_state = drm_atomic_get_connector_state(state, connector);
10925 if (IS_ERR(connector_state)) {
10926 ret = PTR_ERR(connector_state);
10927 goto fail;
10928 }
10929
10930 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10931 if (ret)
10932 goto fail;
10933
10934 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10935 if (IS_ERR(crtc_state)) {
10936 ret = PTR_ERR(crtc_state);
10937 goto fail;
10938 }
10939
10940 crtc_state->base.active = crtc_state->base.enable = true;
10941
10942 if (!mode)
10943 mode = &load_detect_mode;
10944
10945 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10946 if (ret)
10947 goto fail;
10948
10949 ret = intel_modeset_disable_planes(state, crtc);
10950 if (ret)
10951 goto fail;
10952
10953 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10954 if (!ret)
10955 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10956 if (!ret)
10957 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10958 if (ret) {
10959 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10960 goto fail;
10961 }
10962
10963 ret = drm_atomic_commit(state);
10964 if (ret) {
10965 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10966 goto fail;
10967 }
10968
10969 old->restore_state = restore_state;
10970 drm_atomic_state_put(state);
10971
10972 /* let the connector get through one full cycle before testing */
10973 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10974 return true;
10975
10976 fail:
10977 if (state) {
10978 drm_atomic_state_put(state);
10979 state = NULL;
10980 }
10981 if (restore_state) {
10982 drm_atomic_state_put(restore_state);
10983 restore_state = NULL;
10984 }
10985
10986 if (ret == -EDEADLK)
10987 return ret;
10988
10989 return false;
10990 }
10991
10992 void intel_release_load_detect_pipe(struct drm_connector *connector,
10993 struct intel_load_detect_pipe *old,
10994 struct drm_modeset_acquire_ctx *ctx)
10995 {
10996 struct intel_encoder *intel_encoder =
10997 intel_attached_encoder(connector);
10998 struct drm_encoder *encoder = &intel_encoder->base;
10999 struct drm_atomic_state *state = old->restore_state;
11000 int ret;
11001
11002 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11003 connector->base.id, connector->name,
11004 encoder->base.id, encoder->name);
11005
11006 if (!state)
11007 return;
11008
11009 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11010 if (ret)
11011 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11012 drm_atomic_state_put(state);
11013 }
11014
11015 static int i9xx_pll_refclk(struct drm_device *dev,
11016 const struct intel_crtc_state *pipe_config)
11017 {
11018 struct drm_i915_private *dev_priv = to_i915(dev);
11019 u32 dpll = pipe_config->dpll_hw_state.dpll;
11020
11021 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11022 return dev_priv->vbt.lvds_ssc_freq;
11023 else if (HAS_PCH_SPLIT(dev_priv))
11024 return 120000;
11025 else if (!IS_GEN(dev_priv, 2))
11026 return 96000;
11027 else
11028 return 48000;
11029 }
11030
11031 /* Returns the clock of the currently programmed mode of the given pipe. */
11032 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11033 struct intel_crtc_state *pipe_config)
11034 {
11035 struct drm_device *dev = crtc->base.dev;
11036 struct drm_i915_private *dev_priv = to_i915(dev);
11037 int pipe = pipe_config->cpu_transcoder;
11038 u32 dpll = pipe_config->dpll_hw_state.dpll;
11039 u32 fp;
11040 struct dpll clock;
11041 int port_clock;
11042 int refclk = i9xx_pll_refclk(dev, pipe_config);
11043
11044 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11045 fp = pipe_config->dpll_hw_state.fp0;
11046 else
11047 fp = pipe_config->dpll_hw_state.fp1;
11048
11049 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11050 if (IS_PINEVIEW(dev_priv)) {
11051 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11052 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11053 } else {
11054 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11055 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11056 }
11057
11058 if (!IS_GEN(dev_priv, 2)) {
11059 if (IS_PINEVIEW(dev_priv))
11060 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11061 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11062 else
11063 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11064 DPLL_FPA01_P1_POST_DIV_SHIFT);
11065
11066 switch (dpll & DPLL_MODE_MASK) {
11067 case DPLLB_MODE_DAC_SERIAL:
11068 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11069 5 : 10;
11070 break;
11071 case DPLLB_MODE_LVDS:
11072 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11073 7 : 14;
11074 break;
11075 default:
11076 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11077 "mode\n", (int)(dpll & DPLL_MODE_MASK));
11078 return;
11079 }
11080
11081 if (IS_PINEVIEW(dev_priv))
11082 port_clock = pnv_calc_dpll_params(refclk, &clock);
11083 else
11084 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11085 } else {
11086 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11087 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11088
11089 if (is_lvds) {
11090 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11091 DPLL_FPA01_P1_POST_DIV_SHIFT);
11092
11093 if (lvds & LVDS_CLKB_POWER_UP)
11094 clock.p2 = 7;
11095 else
11096 clock.p2 = 14;
11097 } else {
11098 if (dpll & PLL_P1_DIVIDE_BY_TWO)
11099 clock.p1 = 2;
11100 else {
11101 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11102 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11103 }
11104 if (dpll & PLL_P2_DIVIDE_BY_4)
11105 clock.p2 = 4;
11106 else
11107 clock.p2 = 2;
11108 }
11109
11110 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11111 }
11112
11113 /*
11114 * This value includes pixel_multiplier. We will use
11115 * port_clock to compute adjusted_mode.crtc_clock in the
11116 * encoder's get_config() function.
11117 */
11118 pipe_config->port_clock = port_clock;
11119 }
11120
11121 int intel_dotclock_calculate(int link_freq,
11122 const struct intel_link_m_n *m_n)
11123 {
11124 /*
11125 * The calculation for the data clock is:
11126 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11127 * But we want to avoid losing precison if possible, so:
11128 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11129 *
11130 * and the link clock is simpler:
11131 * link_clock = (m * link_clock) / n
11132 */
11133
11134 if (!m_n->link_n)
11135 return 0;
11136
11137 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11138 }
11139
11140 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11141 struct intel_crtc_state *pipe_config)
11142 {
11143 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11144
11145 /* read out port_clock from the DPLL */
11146 i9xx_crtc_clock_get(crtc, pipe_config);
11147
11148 /*
11149 * In case there is an active pipe without active ports,
11150 * we may need some idea for the dotclock anyway.
11151 * Calculate one based on the FDI configuration.
11152 */
11153 pipe_config->base.adjusted_mode.crtc_clock =
11154 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11155 &pipe_config->fdi_m_n);
11156 }
11157
11158 /* Returns the currently programmed mode of the given encoder. */
11159 struct drm_display_mode *
11160 intel_encoder_current_mode(struct intel_encoder *encoder)
11161 {
11162 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11163 struct intel_crtc_state *crtc_state;
11164 struct drm_display_mode *mode;
11165 struct intel_crtc *crtc;
11166 enum pipe pipe;
11167
11168 if (!encoder->get_hw_state(encoder, &pipe))
11169 return NULL;
11170
11171 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11172
11173 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11174 if (!mode)
11175 return NULL;
11176
11177 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11178 if (!crtc_state) {
11179 kfree(mode);
11180 return NULL;
11181 }
11182
11183 crtc_state->base.crtc = &crtc->base;
11184
11185 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11186 kfree(crtc_state);
11187 kfree(mode);
11188 return NULL;
11189 }
11190
11191 encoder->get_config(encoder, crtc_state);
11192
11193 intel_mode_from_pipe_config(mode, crtc_state);
11194
11195 kfree(crtc_state);
11196
11197 return mode;
11198 }
11199
11200 static void intel_crtc_destroy(struct drm_crtc *crtc)
11201 {
11202 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11203
11204 drm_crtc_cleanup(crtc);
11205 kfree(intel_crtc);
11206 }
11207
11208 /**
11209 * intel_wm_need_update - Check whether watermarks need updating
11210 * @cur: current plane state
11211 * @new: new plane state
11212 *
11213 * Check current plane state versus the new one to determine whether
11214 * watermarks need to be recalculated.
11215 *
11216 * Returns true or false.
11217 */
11218 static bool intel_wm_need_update(struct intel_plane_state *cur,
11219 struct intel_plane_state *new)
11220 {
11221 /* Update watermarks on tiling or size changes. */
11222 if (new->base.visible != cur->base.visible)
11223 return true;
11224
11225 if (!cur->base.fb || !new->base.fb)
11226 return false;
11227
11228 if (cur->base.fb->modifier != new->base.fb->modifier ||
11229 cur->base.rotation != new->base.rotation ||
11230 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11231 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11232 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11233 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11234 return true;
11235
11236 return false;
11237 }
11238
11239 static bool needs_scaling(const struct intel_plane_state *state)
11240 {
11241 int src_w = drm_rect_width(&state->base.src) >> 16;
11242 int src_h = drm_rect_height(&state->base.src) >> 16;
11243 int dst_w = drm_rect_width(&state->base.dst);
11244 int dst_h = drm_rect_height(&state->base.dst);
11245
11246 return (src_w != dst_w || src_h != dst_h);
11247 }
11248
11249 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11250 struct drm_crtc_state *crtc_state,
11251 const struct intel_plane_state *old_plane_state,
11252 struct drm_plane_state *plane_state)
11253 {
11254 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11255 struct drm_crtc *crtc = crtc_state->crtc;
11256 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11257 struct intel_plane *plane = to_intel_plane(plane_state->plane);
11258 struct drm_device *dev = crtc->dev;
11259 struct drm_i915_private *dev_priv = to_i915(dev);
11260 bool mode_changed = needs_modeset(crtc_state);
11261 bool was_crtc_enabled = old_crtc_state->base.active;
11262 bool is_crtc_enabled = crtc_state->active;
11263 bool turn_off, turn_on, visible, was_visible;
11264 struct drm_framebuffer *fb = plane_state->fb;
11265 int ret;
11266
11267 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11268 ret = skl_update_scaler_plane(
11269 to_intel_crtc_state(crtc_state),
11270 to_intel_plane_state(plane_state));
11271 if (ret)
11272 return ret;
11273 }
11274
11275 was_visible = old_plane_state->base.visible;
11276 visible = plane_state->visible;
11277
11278 if (!was_crtc_enabled && WARN_ON(was_visible))
11279 was_visible = false;
11280
11281 /*
11282 * Visibility is calculated as if the crtc was on, but
11283 * after scaler setup everything depends on it being off
11284 * when the crtc isn't active.
11285 *
11286 * FIXME this is wrong for watermarks. Watermarks should also
11287 * be computed as if the pipe would be active. Perhaps move
11288 * per-plane wm computation to the .check_plane() hook, and
11289 * only combine the results from all planes in the current place?
11290 */
11291 if (!is_crtc_enabled) {
11292 plane_state->visible = visible = false;
11293 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11294 to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
11295 }
11296
11297 if (!was_visible && !visible)
11298 return 0;
11299
11300 if (fb != old_plane_state->base.fb)
11301 pipe_config->fb_changed = true;
11302
11303 turn_off = was_visible && (!visible || mode_changed);
11304 turn_on = visible && (!was_visible || mode_changed);
11305
11306 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11307 intel_crtc->base.base.id, intel_crtc->base.name,
11308 plane->base.base.id, plane->base.name,
11309 fb ? fb->base.id : -1);
11310
11311 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11312 plane->base.base.id, plane->base.name,
11313 was_visible, visible,
11314 turn_off, turn_on, mode_changed);
11315
11316 if (turn_on) {
11317 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11318 pipe_config->update_wm_pre = true;
11319
11320 /* must disable cxsr around plane enable/disable */
11321 if (plane->id != PLANE_CURSOR)
11322 pipe_config->disable_cxsr = true;
11323 } else if (turn_off) {
11324 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11325 pipe_config->update_wm_post = true;
11326
11327 /* must disable cxsr around plane enable/disable */
11328 if (plane->id != PLANE_CURSOR)
11329 pipe_config->disable_cxsr = true;
11330 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11331 to_intel_plane_state(plane_state))) {
11332 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11333 /* FIXME bollocks */
11334 pipe_config->update_wm_pre = true;
11335 pipe_config->update_wm_post = true;
11336 }
11337 }
11338
11339 if (visible || was_visible)
11340 pipe_config->fb_bits |= plane->frontbuffer_bit;
11341
11342 /*
11343 * ILK/SNB DVSACNTR/Sprite Enable
11344 * IVB SPR_CTL/Sprite Enable
11345 * "When in Self Refresh Big FIFO mode, a write to enable the
11346 * plane will be internally buffered and delayed while Big FIFO
11347 * mode is exiting."
11348 *
11349 * Which means that enabling the sprite can take an extra frame
11350 * when we start in big FIFO mode (LP1+). Thus we need to drop
11351 * down to LP0 and wait for vblank in order to make sure the
11352 * sprite gets enabled on the next vblank after the register write.
11353 * Doing otherwise would risk enabling the sprite one frame after
11354 * we've already signalled flip completion. We can resume LP1+
11355 * once the sprite has been enabled.
11356 *
11357 *
11358 * WaCxSRDisabledForSpriteScaling:ivb
11359 * IVB SPR_SCALE/Scaling Enable
11360 * "Low Power watermarks must be disabled for at least one
11361 * frame before enabling sprite scaling, and kept disabled
11362 * until sprite scaling is disabled."
11363 *
11364 * ILK/SNB DVSASCALE/Scaling Enable
11365 * "When in Self Refresh Big FIFO mode, scaling enable will be
11366 * masked off while Big FIFO mode is exiting."
11367 *
11368 * Despite the w/a only being listed for IVB we assume that
11369 * the ILK/SNB note has similar ramifications, hence we apply
11370 * the w/a on all three platforms.
11371 *
11372 * With experimental results seems this is needed also for primary
11373 * plane, not only sprite plane.
11374 */
11375 if (plane->id != PLANE_CURSOR &&
11376 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11377 IS_IVYBRIDGE(dev_priv)) &&
11378 (turn_on || (!needs_scaling(old_plane_state) &&
11379 needs_scaling(to_intel_plane_state(plane_state)))))
11380 pipe_config->disable_lp_wm = true;
11381
11382 return 0;
11383 }
11384
11385 static bool encoders_cloneable(const struct intel_encoder *a,
11386 const struct intel_encoder *b)
11387 {
11388 /* masks could be asymmetric, so check both ways */
11389 return a == b || (a->cloneable & (1 << b->type) &&
11390 b->cloneable & (1 << a->type));
11391 }
11392
11393 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11394 struct intel_crtc *crtc,
11395 struct intel_encoder *encoder)
11396 {
11397 struct intel_encoder *source_encoder;
11398 struct drm_connector *connector;
11399 struct drm_connector_state *connector_state;
11400 int i;
11401
11402 for_each_new_connector_in_state(state, connector, connector_state, i) {
11403 if (connector_state->crtc != &crtc->base)
11404 continue;
11405
11406 source_encoder =
11407 to_intel_encoder(connector_state->best_encoder);
11408 if (!encoders_cloneable(encoder, source_encoder))
11409 return false;
11410 }
11411
11412 return true;
11413 }
11414
11415 static int icl_add_linked_planes(struct intel_atomic_state *state)
11416 {
11417 struct intel_plane *plane, *linked;
11418 struct intel_plane_state *plane_state, *linked_plane_state;
11419 int i;
11420
11421 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11422 linked = plane_state->linked_plane;
11423
11424 if (!linked)
11425 continue;
11426
11427 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11428 if (IS_ERR(linked_plane_state))
11429 return PTR_ERR(linked_plane_state);
11430
11431 WARN_ON(linked_plane_state->linked_plane != plane);
11432 WARN_ON(linked_plane_state->slave == plane_state->slave);
11433 }
11434
11435 return 0;
11436 }
11437
11438 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11439 {
11440 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11442 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11443 struct intel_plane *plane, *linked;
11444 struct intel_plane_state *plane_state;
11445 int i;
11446
11447 if (INTEL_GEN(dev_priv) < 11)
11448 return 0;
11449
11450 /*
11451 * Destroy all old plane links and make the slave plane invisible
11452 * in the crtc_state->active_planes mask.
11453 */
11454 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11455 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11456 continue;
11457
11458 plane_state->linked_plane = NULL;
11459 if (plane_state->slave && !plane_state->base.visible) {
11460 crtc_state->active_planes &= ~BIT(plane->id);
11461 crtc_state->update_planes |= BIT(plane->id);
11462 }
11463
11464 plane_state->slave = false;
11465 }
11466
11467 if (!crtc_state->nv12_planes)
11468 return 0;
11469
11470 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11471 struct intel_plane_state *linked_state = NULL;
11472
11473 if (plane->pipe != crtc->pipe ||
11474 !(crtc_state->nv12_planes & BIT(plane->id)))
11475 continue;
11476
11477 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11478 if (!icl_is_nv12_y_plane(linked->id))
11479 continue;
11480
11481 if (crtc_state->active_planes & BIT(linked->id))
11482 continue;
11483
11484 linked_state = intel_atomic_get_plane_state(state, linked);
11485 if (IS_ERR(linked_state))
11486 return PTR_ERR(linked_state);
11487
11488 break;
11489 }
11490
11491 if (!linked_state) {
11492 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11493 hweight8(crtc_state->nv12_planes));
11494
11495 return -EINVAL;
11496 }
11497
11498 plane_state->linked_plane = linked;
11499
11500 linked_state->slave = true;
11501 linked_state->linked_plane = plane;
11502 crtc_state->active_planes |= BIT(linked->id);
11503 crtc_state->update_planes |= BIT(linked->id);
11504 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11505 }
11506
11507 return 0;
11508 }
11509
11510 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11511 {
11512 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11513 struct intel_atomic_state *state =
11514 to_intel_atomic_state(new_crtc_state->base.state);
11515 const struct intel_crtc_state *old_crtc_state =
11516 intel_atomic_get_old_crtc_state(state, crtc);
11517
11518 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11519 }
11520
11521 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11522 struct drm_crtc_state *crtc_state)
11523 {
11524 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11525 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11526 struct intel_crtc_state *pipe_config =
11527 to_intel_crtc_state(crtc_state);
11528 int ret;
11529 bool mode_changed = needs_modeset(crtc_state);
11530
11531 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11532 mode_changed && !crtc_state->active)
11533 pipe_config->update_wm_post = true;
11534
11535 if (mode_changed && crtc_state->enable &&
11536 dev_priv->display.crtc_compute_clock &&
11537 !WARN_ON(pipe_config->shared_dpll)) {
11538 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11539 pipe_config);
11540 if (ret)
11541 return ret;
11542 }
11543
11544 /*
11545 * May need to update pipe gamma enable bits
11546 * when C8 planes are getting enabled/disabled.
11547 */
11548 if (c8_planes_changed(pipe_config))
11549 crtc_state->color_mgmt_changed = true;
11550
11551 if (mode_changed || pipe_config->update_pipe ||
11552 crtc_state->color_mgmt_changed) {
11553 ret = intel_color_check(pipe_config);
11554 if (ret)
11555 return ret;
11556 }
11557
11558 ret = 0;
11559 if (dev_priv->display.compute_pipe_wm) {
11560 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11561 if (ret) {
11562 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11563 return ret;
11564 }
11565 }
11566
11567 if (dev_priv->display.compute_intermediate_wm) {
11568 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11569 return 0;
11570
11571 /*
11572 * Calculate 'intermediate' watermarks that satisfy both the
11573 * old state and the new state. We can program these
11574 * immediately.
11575 */
11576 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11577 if (ret) {
11578 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11579 return ret;
11580 }
11581 }
11582
11583 if (INTEL_GEN(dev_priv) >= 9) {
11584 if (mode_changed || pipe_config->update_pipe)
11585 ret = skl_update_scaler_crtc(pipe_config);
11586
11587 if (!ret)
11588 ret = icl_check_nv12_planes(pipe_config);
11589 if (!ret)
11590 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11591 pipe_config);
11592 if (!ret)
11593 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11594 pipe_config);
11595 }
11596
11597 if (HAS_IPS(dev_priv))
11598 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11599
11600 return ret;
11601 }
11602
11603 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11604 .atomic_check = intel_crtc_atomic_check,
11605 };
11606
11607 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11608 {
11609 struct intel_connector *connector;
11610 struct drm_connector_list_iter conn_iter;
11611
11612 drm_connector_list_iter_begin(dev, &conn_iter);
11613 for_each_intel_connector_iter(connector, &conn_iter) {
11614 if (connector->base.state->crtc)
11615 drm_connector_put(&connector->base);
11616
11617 if (connector->base.encoder) {
11618 connector->base.state->best_encoder =
11619 connector->base.encoder;
11620 connector->base.state->crtc =
11621 connector->base.encoder->crtc;
11622
11623 drm_connector_get(&connector->base);
11624 } else {
11625 connector->base.state->best_encoder = NULL;
11626 connector->base.state->crtc = NULL;
11627 }
11628 }
11629 drm_connector_list_iter_end(&conn_iter);
11630 }
11631
11632 static int
11633 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11634 struct intel_crtc_state *pipe_config)
11635 {
11636 struct drm_connector *connector = conn_state->connector;
11637 const struct drm_display_info *info = &connector->display_info;
11638 int bpp;
11639
11640 switch (conn_state->max_bpc) {
11641 case 6 ... 7:
11642 bpp = 6 * 3;
11643 break;
11644 case 8 ... 9:
11645 bpp = 8 * 3;
11646 break;
11647 case 10 ... 11:
11648 bpp = 10 * 3;
11649 break;
11650 case 12:
11651 bpp = 12 * 3;
11652 break;
11653 default:
11654 return -EINVAL;
11655 }
11656
11657 if (bpp < pipe_config->pipe_bpp) {
11658 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11659 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11660 connector->base.id, connector->name,
11661 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11662 pipe_config->pipe_bpp);
11663
11664 pipe_config->pipe_bpp = bpp;
11665 }
11666
11667 return 0;
11668 }
11669
11670 static int
11671 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11672 struct intel_crtc_state *pipe_config)
11673 {
11674 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11675 struct drm_atomic_state *state = pipe_config->base.state;
11676 struct drm_connector *connector;
11677 struct drm_connector_state *connector_state;
11678 int bpp, i;
11679
11680 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11681 IS_CHERRYVIEW(dev_priv)))
11682 bpp = 10*3;
11683 else if (INTEL_GEN(dev_priv) >= 5)
11684 bpp = 12*3;
11685 else
11686 bpp = 8*3;
11687
11688 pipe_config->pipe_bpp = bpp;
11689
11690 /* Clamp display bpp to connector max bpp */
11691 for_each_new_connector_in_state(state, connector, connector_state, i) {
11692 int ret;
11693
11694 if (connector_state->crtc != &crtc->base)
11695 continue;
11696
11697 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11698 if (ret)
11699 return ret;
11700 }
11701
11702 return 0;
11703 }
11704
11705 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11706 {
11707 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11708 "type: 0x%x flags: 0x%x\n",
11709 mode->crtc_clock,
11710 mode->crtc_hdisplay, mode->crtc_hsync_start,
11711 mode->crtc_hsync_end, mode->crtc_htotal,
11712 mode->crtc_vdisplay, mode->crtc_vsync_start,
11713 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11714 }
11715
11716 static inline void
11717 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11718 unsigned int lane_count, struct intel_link_m_n *m_n)
11719 {
11720 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11721 id, lane_count,
11722 m_n->gmch_m, m_n->gmch_n,
11723 m_n->link_m, m_n->link_n, m_n->tu);
11724 }
11725
11726 static void
11727 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11728 const union hdmi_infoframe *frame)
11729 {
11730 if ((drm_debug & DRM_UT_KMS) == 0)
11731 return;
11732
11733 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11734 }
11735
11736 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11737
11738 static const char * const output_type_str[] = {
11739 OUTPUT_TYPE(UNUSED),
11740 OUTPUT_TYPE(ANALOG),
11741 OUTPUT_TYPE(DVO),
11742 OUTPUT_TYPE(SDVO),
11743 OUTPUT_TYPE(LVDS),
11744 OUTPUT_TYPE(TVOUT),
11745 OUTPUT_TYPE(HDMI),
11746 OUTPUT_TYPE(DP),
11747 OUTPUT_TYPE(EDP),
11748 OUTPUT_TYPE(DSI),
11749 OUTPUT_TYPE(DDI),
11750 OUTPUT_TYPE(DP_MST),
11751 };
11752
11753 #undef OUTPUT_TYPE
11754
11755 static void snprintf_output_types(char *buf, size_t len,
11756 unsigned int output_types)
11757 {
11758 char *str = buf;
11759 int i;
11760
11761 str[0] = '\0';
11762
11763 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11764 int r;
11765
11766 if ((output_types & BIT(i)) == 0)
11767 continue;
11768
11769 r = snprintf(str, len, "%s%s",
11770 str != buf ? "," : "", output_type_str[i]);
11771 if (r >= len)
11772 break;
11773 str += r;
11774 len -= r;
11775
11776 output_types &= ~BIT(i);
11777 }
11778
11779 WARN_ON_ONCE(output_types != 0);
11780 }
11781
11782 static const char * const output_format_str[] = {
11783 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11784 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11785 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11786 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11787 };
11788
11789 static const char *output_formats(enum intel_output_format format)
11790 {
11791 if (format >= ARRAY_SIZE(output_format_str))
11792 format = INTEL_OUTPUT_FORMAT_INVALID;
11793 return output_format_str[format];
11794 }
11795
11796 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11797 struct intel_crtc_state *pipe_config,
11798 const char *context)
11799 {
11800 struct drm_device *dev = crtc->base.dev;
11801 struct drm_i915_private *dev_priv = to_i915(dev);
11802 struct drm_plane *plane;
11803 struct intel_plane *intel_plane;
11804 struct intel_plane_state *state;
11805 struct drm_framebuffer *fb;
11806 char buf[64];
11807
11808 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11809 crtc->base.base.id, crtc->base.name, context);
11810
11811 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11812 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11813 buf, pipe_config->output_types);
11814
11815 DRM_DEBUG_KMS("output format: %s\n",
11816 output_formats(pipe_config->output_format));
11817
11818 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11819 transcoder_name(pipe_config->cpu_transcoder),
11820 pipe_config->pipe_bpp, pipe_config->dither);
11821
11822 if (pipe_config->has_pch_encoder)
11823 intel_dump_m_n_config(pipe_config, "fdi",
11824 pipe_config->fdi_lanes,
11825 &pipe_config->fdi_m_n);
11826
11827 if (intel_crtc_has_dp_encoder(pipe_config)) {
11828 intel_dump_m_n_config(pipe_config, "dp m_n",
11829 pipe_config->lane_count, &pipe_config->dp_m_n);
11830 if (pipe_config->has_drrs)
11831 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11832 pipe_config->lane_count,
11833 &pipe_config->dp_m2_n2);
11834 }
11835
11836 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11837 pipe_config->has_audio, pipe_config->has_infoframe);
11838
11839 DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
11840 pipe_config->infoframes.enable);
11841
11842 if (pipe_config->infoframes.enable &
11843 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11844 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11845 if (pipe_config->infoframes.enable &
11846 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11847 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11848 if (pipe_config->infoframes.enable &
11849 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11850 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11851 if (pipe_config->infoframes.enable &
11852 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11853 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11854
11855 DRM_DEBUG_KMS("requested mode:\n");
11856 drm_mode_debug_printmodeline(&pipe_config->base.mode);
11857 DRM_DEBUG_KMS("adjusted mode:\n");
11858 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11859 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11860 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11861 pipe_config->port_clock,
11862 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11863 pipe_config->pixel_rate);
11864
11865 if (INTEL_GEN(dev_priv) >= 9)
11866 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11867 crtc->num_scalers,
11868 pipe_config->scaler_state.scaler_users,
11869 pipe_config->scaler_state.scaler_id);
11870
11871 if (HAS_GMCH(dev_priv))
11872 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11873 pipe_config->gmch_pfit.control,
11874 pipe_config->gmch_pfit.pgm_ratios,
11875 pipe_config->gmch_pfit.lvds_border_bits);
11876 else
11877 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
11878 pipe_config->pch_pfit.pos,
11879 pipe_config->pch_pfit.size,
11880 enableddisabled(pipe_config->pch_pfit.enabled),
11881 yesno(pipe_config->pch_pfit.force_thru));
11882
11883 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11884 pipe_config->ips_enabled, pipe_config->double_wide);
11885
11886 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11887
11888 DRM_DEBUG_KMS("planes on this crtc\n");
11889 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11890 struct drm_format_name_buf format_name;
11891 intel_plane = to_intel_plane(plane);
11892 if (intel_plane->pipe != crtc->pipe)
11893 continue;
11894
11895 state = to_intel_plane_state(plane->state);
11896 fb = state->base.fb;
11897 if (!fb) {
11898 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11899 plane->base.id, plane->name, state->scaler_id);
11900 continue;
11901 }
11902
11903 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11904 plane->base.id, plane->name,
11905 fb->base.id, fb->width, fb->height,
11906 drm_get_format_name(fb->format->format, &format_name));
11907 if (INTEL_GEN(dev_priv) >= 9)
11908 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11909 state->scaler_id,
11910 state->base.src.x1 >> 16,
11911 state->base.src.y1 >> 16,
11912 drm_rect_width(&state->base.src) >> 16,
11913 drm_rect_height(&state->base.src) >> 16,
11914 state->base.dst.x1, state->base.dst.y1,
11915 drm_rect_width(&state->base.dst),
11916 drm_rect_height(&state->base.dst));
11917 }
11918 }
11919
11920 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11921 {
11922 struct drm_device *dev = state->dev;
11923 struct drm_connector *connector;
11924 struct drm_connector_list_iter conn_iter;
11925 unsigned int used_ports = 0;
11926 unsigned int used_mst_ports = 0;
11927 bool ret = true;
11928
11929 /*
11930 * Walk the connector list instead of the encoder
11931 * list to detect the problem on ddi platforms
11932 * where there's just one encoder per digital port.
11933 */
11934 drm_connector_list_iter_begin(dev, &conn_iter);
11935 drm_for_each_connector_iter(connector, &conn_iter) {
11936 struct drm_connector_state *connector_state;
11937 struct intel_encoder *encoder;
11938
11939 connector_state = drm_atomic_get_new_connector_state(state, connector);
11940 if (!connector_state)
11941 connector_state = connector->state;
11942
11943 if (!connector_state->best_encoder)
11944 continue;
11945
11946 encoder = to_intel_encoder(connector_state->best_encoder);
11947
11948 WARN_ON(!connector_state->crtc);
11949
11950 switch (encoder->type) {
11951 unsigned int port_mask;
11952 case INTEL_OUTPUT_DDI:
11953 if (WARN_ON(!HAS_DDI(to_i915(dev))))
11954 break;
11955 /* else: fall through */
11956 case INTEL_OUTPUT_DP:
11957 case INTEL_OUTPUT_HDMI:
11958 case INTEL_OUTPUT_EDP:
11959 port_mask = 1 << encoder->port;
11960
11961 /* the same port mustn't appear more than once */
11962 if (used_ports & port_mask)
11963 ret = false;
11964
11965 used_ports |= port_mask;
11966 break;
11967 case INTEL_OUTPUT_DP_MST:
11968 used_mst_ports |=
11969 1 << encoder->port;
11970 break;
11971 default:
11972 break;
11973 }
11974 }
11975 drm_connector_list_iter_end(&conn_iter);
11976
11977 /* can't mix MST and SST/HDMI on the same port */
11978 if (used_ports & used_mst_ports)
11979 return false;
11980
11981 return ret;
11982 }
11983
11984 static int
11985 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11986 {
11987 struct drm_i915_private *dev_priv =
11988 to_i915(crtc_state->base.crtc->dev);
11989 struct intel_crtc_state *saved_state;
11990
11991 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
11992 if (!saved_state)
11993 return -ENOMEM;
11994
11995 /* FIXME: before the switch to atomic started, a new pipe_config was
11996 * kzalloc'd. Code that depends on any field being zero should be
11997 * fixed, so that the crtc_state can be safely duplicated. For now,
11998 * only fields that are know to not cause problems are preserved. */
11999
12000 saved_state->scaler_state = crtc_state->scaler_state;
12001 saved_state->shared_dpll = crtc_state->shared_dpll;
12002 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12003 saved_state->crc_enabled = crtc_state->crc_enabled;
12004 if (IS_G4X(dev_priv) ||
12005 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12006 saved_state->wm = crtc_state->wm;
12007
12008 /* Keep base drm_crtc_state intact, only clear our extended struct */
12009 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12010 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12011 sizeof(*crtc_state) - sizeof(crtc_state->base));
12012
12013 kfree(saved_state);
12014 return 0;
12015 }
12016
12017 static int
12018 intel_modeset_pipe_config(struct drm_crtc *crtc,
12019 struct intel_crtc_state *pipe_config)
12020 {
12021 struct drm_atomic_state *state = pipe_config->base.state;
12022 struct intel_encoder *encoder;
12023 struct drm_connector *connector;
12024 struct drm_connector_state *connector_state;
12025 int base_bpp, ret;
12026 int i;
12027 bool retry = true;
12028
12029 ret = clear_intel_crtc_state(pipe_config);
12030 if (ret)
12031 return ret;
12032
12033 pipe_config->cpu_transcoder =
12034 (enum transcoder) to_intel_crtc(crtc)->pipe;
12035
12036 /*
12037 * Sanitize sync polarity flags based on requested ones. If neither
12038 * positive or negative polarity is requested, treat this as meaning
12039 * negative polarity.
12040 */
12041 if (!(pipe_config->base.adjusted_mode.flags &
12042 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12043 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12044
12045 if (!(pipe_config->base.adjusted_mode.flags &
12046 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12047 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12048
12049 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12050 pipe_config);
12051 if (ret)
12052 return ret;
12053
12054 base_bpp = pipe_config->pipe_bpp;
12055
12056 /*
12057 * Determine the real pipe dimensions. Note that stereo modes can
12058 * increase the actual pipe size due to the frame doubling and
12059 * insertion of additional space for blanks between the frame. This
12060 * is stored in the crtc timings. We use the requested mode to do this
12061 * computation to clearly distinguish it from the adjusted mode, which
12062 * can be changed by the connectors in the below retry loop.
12063 */
12064 drm_mode_get_hv_timing(&pipe_config->base.mode,
12065 &pipe_config->pipe_src_w,
12066 &pipe_config->pipe_src_h);
12067
12068 for_each_new_connector_in_state(state, connector, connector_state, i) {
12069 if (connector_state->crtc != crtc)
12070 continue;
12071
12072 encoder = to_intel_encoder(connector_state->best_encoder);
12073
12074 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12075 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12076 return -EINVAL;
12077 }
12078
12079 /*
12080 * Determine output_types before calling the .compute_config()
12081 * hooks so that the hooks can use this information safely.
12082 */
12083 if (encoder->compute_output_type)
12084 pipe_config->output_types |=
12085 BIT(encoder->compute_output_type(encoder, pipe_config,
12086 connector_state));
12087 else
12088 pipe_config->output_types |= BIT(encoder->type);
12089 }
12090
12091 encoder_retry:
12092 /* Ensure the port clock defaults are reset when retrying. */
12093 pipe_config->port_clock = 0;
12094 pipe_config->pixel_multiplier = 1;
12095
12096 /* Fill in default crtc timings, allow encoders to overwrite them. */
12097 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12098 CRTC_STEREO_DOUBLE);
12099
12100 /* Pass our mode to the connectors and the CRTC to give them a chance to
12101 * adjust it according to limitations or connector properties, and also
12102 * a chance to reject the mode entirely.
12103 */
12104 for_each_new_connector_in_state(state, connector, connector_state, i) {
12105 if (connector_state->crtc != crtc)
12106 continue;
12107
12108 encoder = to_intel_encoder(connector_state->best_encoder);
12109 ret = encoder->compute_config(encoder, pipe_config,
12110 connector_state);
12111 if (ret < 0) {
12112 if (ret != -EDEADLK)
12113 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12114 ret);
12115 return ret;
12116 }
12117 }
12118
12119 /* Set default port clock if not overwritten by the encoder. Needs to be
12120 * done afterwards in case the encoder adjusts the mode. */
12121 if (!pipe_config->port_clock)
12122 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12123 * pipe_config->pixel_multiplier;
12124
12125 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12126 if (ret == -EDEADLK)
12127 return ret;
12128 if (ret < 0) {
12129 DRM_DEBUG_KMS("CRTC fixup failed\n");
12130 return ret;
12131 }
12132
12133 if (ret == RETRY) {
12134 if (WARN(!retry, "loop in pipe configuration computation\n"))
12135 return -EINVAL;
12136
12137 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12138 retry = false;
12139 goto encoder_retry;
12140 }
12141
12142 /* Dithering seems to not pass-through bits correctly when it should, so
12143 * only enable it on 6bpc panels and when its not a compliance
12144 * test requesting 6bpc video pattern.
12145 */
12146 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12147 !pipe_config->dither_force_disable;
12148 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12149 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12150
12151 return 0;
12152 }
12153
12154 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12155 {
12156 int diff;
12157
12158 if (clock1 == clock2)
12159 return true;
12160
12161 if (!clock1 || !clock2)
12162 return false;
12163
12164 diff = abs(clock1 - clock2);
12165
12166 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12167 return true;
12168
12169 return false;
12170 }
12171
12172 static bool
12173 intel_compare_m_n(unsigned int m, unsigned int n,
12174 unsigned int m2, unsigned int n2,
12175 bool exact)
12176 {
12177 if (m == m2 && n == n2)
12178 return true;
12179
12180 if (exact || !m || !n || !m2 || !n2)
12181 return false;
12182
12183 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12184
12185 if (n > n2) {
12186 while (n > n2) {
12187 m2 <<= 1;
12188 n2 <<= 1;
12189 }
12190 } else if (n < n2) {
12191 while (n < n2) {
12192 m <<= 1;
12193 n <<= 1;
12194 }
12195 }
12196
12197 if (n != n2)
12198 return false;
12199
12200 return intel_fuzzy_clock_check(m, m2);
12201 }
12202
12203 static bool
12204 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12205 struct intel_link_m_n *m2_n2,
12206 bool adjust)
12207 {
12208 if (m_n->tu == m2_n2->tu &&
12209 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12210 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12211 intel_compare_m_n(m_n->link_m, m_n->link_n,
12212 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12213 if (adjust)
12214 *m2_n2 = *m_n;
12215
12216 return true;
12217 }
12218
12219 return false;
12220 }
12221
12222 static bool
12223 intel_compare_infoframe(const union hdmi_infoframe *a,
12224 const union hdmi_infoframe *b)
12225 {
12226 return memcmp(a, b, sizeof(*a)) == 0;
12227 }
12228
12229 static void
12230 pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
12231 bool adjust, const char *name,
12232 const union hdmi_infoframe *a,
12233 const union hdmi_infoframe *b)
12234 {
12235 if (adjust) {
12236 if ((drm_debug & DRM_UT_KMS) == 0)
12237 return;
12238
12239 drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
12240 drm_dbg(DRM_UT_KMS, "expected:");
12241 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12242 drm_dbg(DRM_UT_KMS, "found");
12243 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12244 } else {
12245 drm_err("mismatch in %s infoframe", name);
12246 drm_err("expected:");
12247 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12248 drm_err("found");
12249 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12250 }
12251 }
12252
12253 static void __printf(3, 4)
12254 pipe_config_err(bool adjust, const char *name, const char *format, ...)
12255 {
12256 struct va_format vaf;
12257 va_list args;
12258
12259 va_start(args, format);
12260 vaf.fmt = format;
12261 vaf.va = &args;
12262
12263 if (adjust)
12264 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
12265 else
12266 drm_err("mismatch in %s %pV", name, &vaf);
12267
12268 va_end(args);
12269 }
12270
12271 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12272 {
12273 if (i915_modparams.fastboot != -1)
12274 return i915_modparams.fastboot;
12275
12276 /* Enable fastboot by default on Skylake and newer */
12277 if (INTEL_GEN(dev_priv) >= 9)
12278 return true;
12279
12280 /* Enable fastboot by default on VLV and CHV */
12281 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12282 return true;
12283
12284 /* Disabled by default on all others */
12285 return false;
12286 }
12287
12288 static bool
12289 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12290 struct intel_crtc_state *current_config,
12291 struct intel_crtc_state *pipe_config,
12292 bool adjust)
12293 {
12294 bool ret = true;
12295 bool fixup_inherited = adjust &&
12296 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12297 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12298
12299 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12300 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12301 ret = false;
12302 }
12303
12304 #define PIPE_CONF_CHECK_X(name) do { \
12305 if (current_config->name != pipe_config->name) { \
12306 pipe_config_err(adjust, __stringify(name), \
12307 "(expected 0x%08x, found 0x%08x)\n", \
12308 current_config->name, \
12309 pipe_config->name); \
12310 ret = false; \
12311 } \
12312 } while (0)
12313
12314 #define PIPE_CONF_CHECK_I(name) do { \
12315 if (current_config->name != pipe_config->name) { \
12316 pipe_config_err(adjust, __stringify(name), \
12317 "(expected %i, found %i)\n", \
12318 current_config->name, \
12319 pipe_config->name); \
12320 ret = false; \
12321 } \
12322 } while (0)
12323
12324 #define PIPE_CONF_CHECK_BOOL(name) do { \
12325 if (current_config->name != pipe_config->name) { \
12326 pipe_config_err(adjust, __stringify(name), \
12327 "(expected %s, found %s)\n", \
12328 yesno(current_config->name), \
12329 yesno(pipe_config->name)); \
12330 ret = false; \
12331 } \
12332 } while (0)
12333
12334 /*
12335 * Checks state where we only read out the enabling, but not the entire
12336 * state itself (like full infoframes or ELD for audio). These states
12337 * require a full modeset on bootup to fix up.
12338 */
12339 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12340 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12341 PIPE_CONF_CHECK_BOOL(name); \
12342 } else { \
12343 pipe_config_err(adjust, __stringify(name), \
12344 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12345 yesno(current_config->name), \
12346 yesno(pipe_config->name)); \
12347 ret = false; \
12348 } \
12349 } while (0)
12350
12351 #define PIPE_CONF_CHECK_P(name) do { \
12352 if (current_config->name != pipe_config->name) { \
12353 pipe_config_err(adjust, __stringify(name), \
12354 "(expected %p, found %p)\n", \
12355 current_config->name, \
12356 pipe_config->name); \
12357 ret = false; \
12358 } \
12359 } while (0)
12360
12361 #define PIPE_CONF_CHECK_M_N(name) do { \
12362 if (!intel_compare_link_m_n(&current_config->name, \
12363 &pipe_config->name,\
12364 adjust)) { \
12365 pipe_config_err(adjust, __stringify(name), \
12366 "(expected tu %i gmch %i/%i link %i/%i, " \
12367 "found tu %i, gmch %i/%i link %i/%i)\n", \
12368 current_config->name.tu, \
12369 current_config->name.gmch_m, \
12370 current_config->name.gmch_n, \
12371 current_config->name.link_m, \
12372 current_config->name.link_n, \
12373 pipe_config->name.tu, \
12374 pipe_config->name.gmch_m, \
12375 pipe_config->name.gmch_n, \
12376 pipe_config->name.link_m, \
12377 pipe_config->name.link_n); \
12378 ret = false; \
12379 } \
12380 } while (0)
12381
12382 /* This is required for BDW+ where there is only one set of registers for
12383 * switching between high and low RR.
12384 * This macro can be used whenever a comparison has to be made between one
12385 * hw state and multiple sw state variables.
12386 */
12387 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12388 if (!intel_compare_link_m_n(&current_config->name, \
12389 &pipe_config->name, adjust) && \
12390 !intel_compare_link_m_n(&current_config->alt_name, \
12391 &pipe_config->name, adjust)) { \
12392 pipe_config_err(adjust, __stringify(name), \
12393 "(expected tu %i gmch %i/%i link %i/%i, " \
12394 "or tu %i gmch %i/%i link %i/%i, " \
12395 "found tu %i, gmch %i/%i link %i/%i)\n", \
12396 current_config->name.tu, \
12397 current_config->name.gmch_m, \
12398 current_config->name.gmch_n, \
12399 current_config->name.link_m, \
12400 current_config->name.link_n, \
12401 current_config->alt_name.tu, \
12402 current_config->alt_name.gmch_m, \
12403 current_config->alt_name.gmch_n, \
12404 current_config->alt_name.link_m, \
12405 current_config->alt_name.link_n, \
12406 pipe_config->name.tu, \
12407 pipe_config->name.gmch_m, \
12408 pipe_config->name.gmch_n, \
12409 pipe_config->name.link_m, \
12410 pipe_config->name.link_n); \
12411 ret = false; \
12412 } \
12413 } while (0)
12414
12415 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12416 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12417 pipe_config_err(adjust, __stringify(name), \
12418 "(%x) (expected %i, found %i)\n", \
12419 (mask), \
12420 current_config->name & (mask), \
12421 pipe_config->name & (mask)); \
12422 ret = false; \
12423 } \
12424 } while (0)
12425
12426 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12427 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12428 pipe_config_err(adjust, __stringify(name), \
12429 "(expected %i, found %i)\n", \
12430 current_config->name, \
12431 pipe_config->name); \
12432 ret = false; \
12433 } \
12434 } while (0)
12435
12436 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12437 if (!intel_compare_infoframe(&current_config->infoframes.name, \
12438 &pipe_config->infoframes.name)) { \
12439 pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
12440 &current_config->infoframes.name, \
12441 &pipe_config->infoframes.name); \
12442 ret = false; \
12443 } \
12444 } while (0)
12445
12446 #define PIPE_CONF_QUIRK(quirk) \
12447 ((current_config->quirks | pipe_config->quirks) & (quirk))
12448
12449 PIPE_CONF_CHECK_I(cpu_transcoder);
12450
12451 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12452 PIPE_CONF_CHECK_I(fdi_lanes);
12453 PIPE_CONF_CHECK_M_N(fdi_m_n);
12454
12455 PIPE_CONF_CHECK_I(lane_count);
12456 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12457
12458 if (INTEL_GEN(dev_priv) < 8) {
12459 PIPE_CONF_CHECK_M_N(dp_m_n);
12460
12461 if (current_config->has_drrs)
12462 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12463 } else
12464 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12465
12466 PIPE_CONF_CHECK_X(output_types);
12467
12468 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12469 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12470 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12471 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12472 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12473 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12474
12475 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12476 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12477 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12478 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12479 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12480 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12481
12482 PIPE_CONF_CHECK_I(pixel_multiplier);
12483 PIPE_CONF_CHECK_I(output_format);
12484 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12485 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12486 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12487 PIPE_CONF_CHECK_BOOL(limited_color_range);
12488
12489 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12490 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12491 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12492
12493 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12494
12495 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12496 DRM_MODE_FLAG_INTERLACE);
12497
12498 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12499 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12500 DRM_MODE_FLAG_PHSYNC);
12501 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12502 DRM_MODE_FLAG_NHSYNC);
12503 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12504 DRM_MODE_FLAG_PVSYNC);
12505 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12506 DRM_MODE_FLAG_NVSYNC);
12507 }
12508
12509 PIPE_CONF_CHECK_X(gmch_pfit.control);
12510 /* pfit ratios are autocomputed by the hw on gen4+ */
12511 if (INTEL_GEN(dev_priv) < 4)
12512 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12513 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12514
12515 /*
12516 * Changing the EDP transcoder input mux
12517 * (A_ONOFF vs. A_ON) requires a full modeset.
12518 */
12519 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
12520
12521 if (!adjust) {
12522 PIPE_CONF_CHECK_I(pipe_src_w);
12523 PIPE_CONF_CHECK_I(pipe_src_h);
12524
12525 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12526 if (current_config->pch_pfit.enabled) {
12527 PIPE_CONF_CHECK_X(pch_pfit.pos);
12528 PIPE_CONF_CHECK_X(pch_pfit.size);
12529 }
12530
12531 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12532 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12533
12534 PIPE_CONF_CHECK_X(gamma_mode);
12535 if (IS_CHERRYVIEW(dev_priv))
12536 PIPE_CONF_CHECK_X(cgm_mode);
12537 else
12538 PIPE_CONF_CHECK_X(csc_mode);
12539 PIPE_CONF_CHECK_BOOL(gamma_enable);
12540 PIPE_CONF_CHECK_BOOL(csc_enable);
12541 }
12542
12543 PIPE_CONF_CHECK_BOOL(double_wide);
12544
12545 PIPE_CONF_CHECK_P(shared_dpll);
12546 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12547 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12548 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12549 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12550 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12551 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12552 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12553 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12554 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12555 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12556 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12557 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12558 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12559 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12560 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12561 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12562 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12563 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12564 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12565 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12566 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12567 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12568 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12569 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12570 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12571 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12572 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12573 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12574 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12575 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12576 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12577
12578 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12579 PIPE_CONF_CHECK_X(dsi_pll.div);
12580
12581 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12582 PIPE_CONF_CHECK_I(pipe_bpp);
12583
12584 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12585 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12586
12587 PIPE_CONF_CHECK_I(min_voltage_level);
12588
12589 PIPE_CONF_CHECK_X(infoframes.enable);
12590 PIPE_CONF_CHECK_X(infoframes.gcp);
12591 PIPE_CONF_CHECK_INFOFRAME(avi);
12592 PIPE_CONF_CHECK_INFOFRAME(spd);
12593 PIPE_CONF_CHECK_INFOFRAME(hdmi);
12594 PIPE_CONF_CHECK_INFOFRAME(drm);
12595
12596 #undef PIPE_CONF_CHECK_X
12597 #undef PIPE_CONF_CHECK_I
12598 #undef PIPE_CONF_CHECK_BOOL
12599 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12600 #undef PIPE_CONF_CHECK_P
12601 #undef PIPE_CONF_CHECK_FLAGS
12602 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12603 #undef PIPE_CONF_QUIRK
12604
12605 return ret;
12606 }
12607
12608 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12609 const struct intel_crtc_state *pipe_config)
12610 {
12611 if (pipe_config->has_pch_encoder) {
12612 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12613 &pipe_config->fdi_m_n);
12614 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12615
12616 /*
12617 * FDI already provided one idea for the dotclock.
12618 * Yell if the encoder disagrees.
12619 */
12620 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12621 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12622 fdi_dotclock, dotclock);
12623 }
12624 }
12625
12626 static void verify_wm_state(struct drm_crtc *crtc,
12627 struct drm_crtc_state *new_state)
12628 {
12629 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12630 struct skl_hw_state {
12631 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12632 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12633 struct skl_ddb_allocation ddb;
12634 struct skl_pipe_wm wm;
12635 } *hw;
12636 struct skl_ddb_allocation *sw_ddb;
12637 struct skl_pipe_wm *sw_wm;
12638 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12640 const enum pipe pipe = intel_crtc->pipe;
12641 int plane, level, max_level = ilk_wm_max_level(dev_priv);
12642
12643 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12644 return;
12645
12646 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12647 if (!hw)
12648 return;
12649
12650 skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
12651 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12652
12653 skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
12654
12655 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12656 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12657
12658 if (INTEL_GEN(dev_priv) >= 11 &&
12659 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12660 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12661 sw_ddb->enabled_slices,
12662 hw->ddb.enabled_slices);
12663
12664 /* planes */
12665 for_each_universal_plane(dev_priv, pipe, plane) {
12666 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12667
12668 hw_plane_wm = &hw->wm.planes[plane];
12669 sw_plane_wm = &sw_wm->planes[plane];
12670
12671 /* Watermarks */
12672 for (level = 0; level <= max_level; level++) {
12673 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12674 &sw_plane_wm->wm[level]))
12675 continue;
12676
12677 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12678 pipe_name(pipe), plane + 1, level,
12679 sw_plane_wm->wm[level].plane_en,
12680 sw_plane_wm->wm[level].plane_res_b,
12681 sw_plane_wm->wm[level].plane_res_l,
12682 hw_plane_wm->wm[level].plane_en,
12683 hw_plane_wm->wm[level].plane_res_b,
12684 hw_plane_wm->wm[level].plane_res_l);
12685 }
12686
12687 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12688 &sw_plane_wm->trans_wm)) {
12689 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12690 pipe_name(pipe), plane + 1,
12691 sw_plane_wm->trans_wm.plane_en,
12692 sw_plane_wm->trans_wm.plane_res_b,
12693 sw_plane_wm->trans_wm.plane_res_l,
12694 hw_plane_wm->trans_wm.plane_en,
12695 hw_plane_wm->trans_wm.plane_res_b,
12696 hw_plane_wm->trans_wm.plane_res_l);
12697 }
12698
12699 /* DDB */
12700 hw_ddb_entry = &hw->ddb_y[plane];
12701 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12702
12703 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12704 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12705 pipe_name(pipe), plane + 1,
12706 sw_ddb_entry->start, sw_ddb_entry->end,
12707 hw_ddb_entry->start, hw_ddb_entry->end);
12708 }
12709 }
12710
12711 /*
12712 * cursor
12713 * If the cursor plane isn't active, we may not have updated it's ddb
12714 * allocation. In that case since the ddb allocation will be updated
12715 * once the plane becomes visible, we can skip this check
12716 */
12717 if (1) {
12718 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12719
12720 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12721 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12722
12723 /* Watermarks */
12724 for (level = 0; level <= max_level; level++) {
12725 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12726 &sw_plane_wm->wm[level]))
12727 continue;
12728
12729 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12730 pipe_name(pipe), level,
12731 sw_plane_wm->wm[level].plane_en,
12732 sw_plane_wm->wm[level].plane_res_b,
12733 sw_plane_wm->wm[level].plane_res_l,
12734 hw_plane_wm->wm[level].plane_en,
12735 hw_plane_wm->wm[level].plane_res_b,
12736 hw_plane_wm->wm[level].plane_res_l);
12737 }
12738
12739 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12740 &sw_plane_wm->trans_wm)) {
12741 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12742 pipe_name(pipe),
12743 sw_plane_wm->trans_wm.plane_en,
12744 sw_plane_wm->trans_wm.plane_res_b,
12745 sw_plane_wm->trans_wm.plane_res_l,
12746 hw_plane_wm->trans_wm.plane_en,
12747 hw_plane_wm->trans_wm.plane_res_b,
12748 hw_plane_wm->trans_wm.plane_res_l);
12749 }
12750
12751 /* DDB */
12752 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12753 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12754
12755 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12756 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12757 pipe_name(pipe),
12758 sw_ddb_entry->start, sw_ddb_entry->end,
12759 hw_ddb_entry->start, hw_ddb_entry->end);
12760 }
12761 }
12762
12763 kfree(hw);
12764 }
12765
12766 static void
12767 verify_connector_state(struct drm_device *dev,
12768 struct drm_atomic_state *state,
12769 struct drm_crtc *crtc)
12770 {
12771 struct drm_connector *connector;
12772 struct drm_connector_state *new_conn_state;
12773 int i;
12774
12775 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12776 struct drm_encoder *encoder = connector->encoder;
12777 struct drm_crtc_state *crtc_state = NULL;
12778
12779 if (new_conn_state->crtc != crtc)
12780 continue;
12781
12782 if (crtc)
12783 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12784
12785 intel_connector_verify_state(crtc_state, new_conn_state);
12786
12787 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12788 "connector's atomic encoder doesn't match legacy encoder\n");
12789 }
12790 }
12791
12792 static void
12793 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12794 {
12795 struct intel_encoder *encoder;
12796 struct drm_connector *connector;
12797 struct drm_connector_state *old_conn_state, *new_conn_state;
12798 int i;
12799
12800 for_each_intel_encoder(dev, encoder) {
12801 bool enabled = false, found = false;
12802 enum pipe pipe;
12803
12804 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12805 encoder->base.base.id,
12806 encoder->base.name);
12807
12808 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12809 new_conn_state, i) {
12810 if (old_conn_state->best_encoder == &encoder->base)
12811 found = true;
12812
12813 if (new_conn_state->best_encoder != &encoder->base)
12814 continue;
12815 found = enabled = true;
12816
12817 I915_STATE_WARN(new_conn_state->crtc !=
12818 encoder->base.crtc,
12819 "connector's crtc doesn't match encoder crtc\n");
12820 }
12821
12822 if (!found)
12823 continue;
12824
12825 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12826 "encoder's enabled state mismatch "
12827 "(expected %i, found %i)\n",
12828 !!encoder->base.crtc, enabled);
12829
12830 if (!encoder->base.crtc) {
12831 bool active;
12832
12833 active = encoder->get_hw_state(encoder, &pipe);
12834 I915_STATE_WARN(active,
12835 "encoder detached but still enabled on pipe %c.\n",
12836 pipe_name(pipe));
12837 }
12838 }
12839 }
12840
12841 static void
12842 verify_crtc_state(struct drm_crtc *crtc,
12843 struct drm_crtc_state *old_crtc_state,
12844 struct drm_crtc_state *new_crtc_state)
12845 {
12846 struct drm_device *dev = crtc->dev;
12847 struct drm_i915_private *dev_priv = to_i915(dev);
12848 struct intel_encoder *encoder;
12849 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12850 struct intel_crtc_state *pipe_config, *sw_config;
12851 struct drm_atomic_state *old_state;
12852 bool active;
12853
12854 old_state = old_crtc_state->state;
12855 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12856 pipe_config = to_intel_crtc_state(old_crtc_state);
12857 memset(pipe_config, 0, sizeof(*pipe_config));
12858 pipe_config->base.crtc = crtc;
12859 pipe_config->base.state = old_state;
12860
12861 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12862
12863 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12864
12865 /* we keep both pipes enabled on 830 */
12866 if (IS_I830(dev_priv))
12867 active = new_crtc_state->active;
12868
12869 I915_STATE_WARN(new_crtc_state->active != active,
12870 "crtc active state doesn't match with hw state "
12871 "(expected %i, found %i)\n", new_crtc_state->active, active);
12872
12873 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12874 "transitional active state does not match atomic hw state "
12875 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12876
12877 for_each_encoder_on_crtc(dev, crtc, encoder) {
12878 enum pipe pipe;
12879
12880 active = encoder->get_hw_state(encoder, &pipe);
12881 I915_STATE_WARN(active != new_crtc_state->active,
12882 "[ENCODER:%i] active %i with crtc active %i\n",
12883 encoder->base.base.id, active, new_crtc_state->active);
12884
12885 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12886 "Encoder connected to wrong pipe %c\n",
12887 pipe_name(pipe));
12888
12889 if (active)
12890 encoder->get_config(encoder, pipe_config);
12891 }
12892
12893 intel_crtc_compute_pixel_rate(pipe_config);
12894
12895 if (!new_crtc_state->active)
12896 return;
12897
12898 intel_pipe_config_sanity_check(dev_priv, pipe_config);
12899
12900 sw_config = to_intel_crtc_state(new_crtc_state);
12901 if (!intel_pipe_config_compare(dev_priv, sw_config,
12902 pipe_config, false)) {
12903 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12904 intel_dump_pipe_config(intel_crtc, pipe_config,
12905 "[hw state]");
12906 intel_dump_pipe_config(intel_crtc, sw_config,
12907 "[sw state]");
12908 }
12909 }
12910
12911 static void
12912 intel_verify_planes(struct intel_atomic_state *state)
12913 {
12914 struct intel_plane *plane;
12915 const struct intel_plane_state *plane_state;
12916 int i;
12917
12918 for_each_new_intel_plane_in_state(state, plane,
12919 plane_state, i)
12920 assert_plane(plane, plane_state->slave ||
12921 plane_state->base.visible);
12922 }
12923
12924 static void
12925 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12926 struct intel_shared_dpll *pll,
12927 struct drm_crtc *crtc,
12928 struct drm_crtc_state *new_state)
12929 {
12930 struct intel_dpll_hw_state dpll_hw_state;
12931 unsigned int crtc_mask;
12932 bool active;
12933
12934 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12935
12936 DRM_DEBUG_KMS("%s\n", pll->info->name);
12937
12938 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12939
12940 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12941 I915_STATE_WARN(!pll->on && pll->active_mask,
12942 "pll in active use but not on in sw tracking\n");
12943 I915_STATE_WARN(pll->on && !pll->active_mask,
12944 "pll is on but not used by any active crtc\n");
12945 I915_STATE_WARN(pll->on != active,
12946 "pll on state mismatch (expected %i, found %i)\n",
12947 pll->on, active);
12948 }
12949
12950 if (!crtc) {
12951 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12952 "more active pll users than references: %x vs %x\n",
12953 pll->active_mask, pll->state.crtc_mask);
12954
12955 return;
12956 }
12957
12958 crtc_mask = drm_crtc_mask(crtc);
12959
12960 if (new_state->active)
12961 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12962 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12963 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12964 else
12965 I915_STATE_WARN(pll->active_mask & crtc_mask,
12966 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12967 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12968
12969 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12970 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12971 crtc_mask, pll->state.crtc_mask);
12972
12973 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12974 &dpll_hw_state,
12975 sizeof(dpll_hw_state)),
12976 "pll hw state mismatch\n");
12977 }
12978
12979 static void
12980 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12981 struct drm_crtc_state *old_crtc_state,
12982 struct drm_crtc_state *new_crtc_state)
12983 {
12984 struct drm_i915_private *dev_priv = to_i915(dev);
12985 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12986 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12987
12988 if (new_state->shared_dpll)
12989 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12990
12991 if (old_state->shared_dpll &&
12992 old_state->shared_dpll != new_state->shared_dpll) {
12993 unsigned int crtc_mask = drm_crtc_mask(crtc);
12994 struct intel_shared_dpll *pll = old_state->shared_dpll;
12995
12996 I915_STATE_WARN(pll->active_mask & crtc_mask,
12997 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12998 pipe_name(drm_crtc_index(crtc)));
12999 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13000 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13001 pipe_name(drm_crtc_index(crtc)));
13002 }
13003 }
13004
13005 static void
13006 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13007 struct drm_atomic_state *state,
13008 struct drm_crtc_state *old_state,
13009 struct drm_crtc_state *new_state)
13010 {
13011 if (!needs_modeset(new_state) &&
13012 !to_intel_crtc_state(new_state)->update_pipe)
13013 return;
13014
13015 verify_wm_state(crtc, new_state);
13016 verify_connector_state(crtc->dev, state, crtc);
13017 verify_crtc_state(crtc, old_state, new_state);
13018 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13019 }
13020
13021 static void
13022 verify_disabled_dpll_state(struct drm_device *dev)
13023 {
13024 struct drm_i915_private *dev_priv = to_i915(dev);
13025 int i;
13026
13027 for (i = 0; i < dev_priv->num_shared_dpll; i++)
13028 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13029 }
13030
13031 static void
13032 intel_modeset_verify_disabled(struct drm_device *dev,
13033 struct drm_atomic_state *state)
13034 {
13035 verify_encoder_state(dev, state);
13036 verify_connector_state(dev, state, NULL);
13037 verify_disabled_dpll_state(dev);
13038 }
13039
13040 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
13041 {
13042 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13043 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13044
13045 /*
13046 * The scanline counter increments at the leading edge of hsync.
13047 *
13048 * On most platforms it starts counting from vtotal-1 on the
13049 * first active line. That means the scanline counter value is
13050 * always one less than what we would expect. Ie. just after
13051 * start of vblank, which also occurs at start of hsync (on the
13052 * last active line), the scanline counter will read vblank_start-1.
13053 *
13054 * On gen2 the scanline counter starts counting from 1 instead
13055 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13056 * to keep the value positive), instead of adding one.
13057 *
13058 * On HSW+ the behaviour of the scanline counter depends on the output
13059 * type. For DP ports it behaves like most other platforms, but on HDMI
13060 * there's an extra 1 line difference. So we need to add two instead of
13061 * one to the value.
13062 *
13063 * On VLV/CHV DSI the scanline counter would appear to increment
13064 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13065 * that means we can't tell whether we're in vblank or not while
13066 * we're on that particular line. We must still set scanline_offset
13067 * to 1 so that the vblank timestamps come out correct when we query
13068 * the scanline counter from within the vblank interrupt handler.
13069 * However if queried just before the start of vblank we'll get an
13070 * answer that's slightly in the future.
13071 */
13072 if (IS_GEN(dev_priv, 2)) {
13073 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
13074 int vtotal;
13075
13076 vtotal = adjusted_mode->crtc_vtotal;
13077 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13078 vtotal /= 2;
13079
13080 crtc->scanline_offset = vtotal - 1;
13081 } else if (HAS_DDI(dev_priv) &&
13082 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13083 crtc->scanline_offset = 2;
13084 } else
13085 crtc->scanline_offset = 1;
13086 }
13087
13088 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13089 {
13090 struct drm_device *dev = state->dev;
13091 struct drm_i915_private *dev_priv = to_i915(dev);
13092 struct drm_crtc *crtc;
13093 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13094 int i;
13095
13096 if (!dev_priv->display.crtc_compute_clock)
13097 return;
13098
13099 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13101 struct intel_shared_dpll *old_dpll =
13102 to_intel_crtc_state(old_crtc_state)->shared_dpll;
13103
13104 if (!needs_modeset(new_crtc_state))
13105 continue;
13106
13107 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
13108
13109 if (!old_dpll)
13110 continue;
13111
13112 intel_release_shared_dpll(old_dpll, intel_crtc, state);
13113 }
13114 }
13115
13116 /*
13117 * This implements the workaround described in the "notes" section of the mode
13118 * set sequence documentation. When going from no pipes or single pipe to
13119 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13120 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13121 */
13122 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13123 {
13124 struct drm_crtc_state *crtc_state;
13125 struct intel_crtc *intel_crtc;
13126 struct drm_crtc *crtc;
13127 struct intel_crtc_state *first_crtc_state = NULL;
13128 struct intel_crtc_state *other_crtc_state = NULL;
13129 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13130 int i;
13131
13132 /* look at all crtc's that are going to be enabled in during modeset */
13133 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
13134 intel_crtc = to_intel_crtc(crtc);
13135
13136 if (!crtc_state->active || !needs_modeset(crtc_state))
13137 continue;
13138
13139 if (first_crtc_state) {
13140 other_crtc_state = to_intel_crtc_state(crtc_state);
13141 break;
13142 } else {
13143 first_crtc_state = to_intel_crtc_state(crtc_state);
13144 first_pipe = intel_crtc->pipe;
13145 }
13146 }
13147
13148 /* No workaround needed? */
13149 if (!first_crtc_state)
13150 return 0;
13151
13152 /* w/a possibly needed, check how many crtc's are already enabled. */
13153 for_each_intel_crtc(state->dev, intel_crtc) {
13154 struct intel_crtc_state *pipe_config;
13155
13156 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13157 if (IS_ERR(pipe_config))
13158 return PTR_ERR(pipe_config);
13159
13160 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13161
13162 if (!pipe_config->base.active ||
13163 needs_modeset(&pipe_config->base))
13164 continue;
13165
13166 /* 2 or more enabled crtcs means no need for w/a */
13167 if (enabled_pipe != INVALID_PIPE)
13168 return 0;
13169
13170 enabled_pipe = intel_crtc->pipe;
13171 }
13172
13173 if (enabled_pipe != INVALID_PIPE)
13174 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13175 else if (other_crtc_state)
13176 other_crtc_state->hsw_workaround_pipe = first_pipe;
13177
13178 return 0;
13179 }
13180
13181 static int intel_lock_all_pipes(struct drm_atomic_state *state)
13182 {
13183 struct drm_crtc *crtc;
13184
13185 /* Add all pipes to the state */
13186 for_each_crtc(state->dev, crtc) {
13187 struct drm_crtc_state *crtc_state;
13188
13189 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13190 if (IS_ERR(crtc_state))
13191 return PTR_ERR(crtc_state);
13192 }
13193
13194 return 0;
13195 }
13196
13197 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13198 {
13199 struct drm_crtc *crtc;
13200
13201 /*
13202 * Add all pipes to the state, and force
13203 * a modeset on all the active ones.
13204 */
13205 for_each_crtc(state->dev, crtc) {
13206 struct drm_crtc_state *crtc_state;
13207 int ret;
13208
13209 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13210 if (IS_ERR(crtc_state))
13211 return PTR_ERR(crtc_state);
13212
13213 if (!crtc_state->active || needs_modeset(crtc_state))
13214 continue;
13215
13216 crtc_state->mode_changed = true;
13217
13218 ret = drm_atomic_add_affected_connectors(state, crtc);
13219 if (ret)
13220 return ret;
13221
13222 ret = drm_atomic_add_affected_planes(state, crtc);
13223 if (ret)
13224 return ret;
13225 }
13226
13227 return 0;
13228 }
13229
13230 static int intel_modeset_checks(struct drm_atomic_state *state)
13231 {
13232 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13233 struct drm_i915_private *dev_priv = to_i915(state->dev);
13234 struct drm_crtc *crtc;
13235 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13236 int ret = 0, i;
13237
13238 if (!check_digital_port_conflicts(state)) {
13239 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13240 return -EINVAL;
13241 }
13242
13243 /* keep the current setting */
13244 if (!intel_state->cdclk.force_min_cdclk_changed)
13245 intel_state->cdclk.force_min_cdclk =
13246 dev_priv->cdclk.force_min_cdclk;
13247
13248 intel_state->modeset = true;
13249 intel_state->active_crtcs = dev_priv->active_crtcs;
13250 intel_state->cdclk.logical = dev_priv->cdclk.logical;
13251 intel_state->cdclk.actual = dev_priv->cdclk.actual;
13252 intel_state->cdclk.pipe = INVALID_PIPE;
13253
13254 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13255 if (new_crtc_state->active)
13256 intel_state->active_crtcs |= 1 << i;
13257 else
13258 intel_state->active_crtcs &= ~(1 << i);
13259
13260 if (old_crtc_state->active != new_crtc_state->active)
13261 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13262 }
13263
13264 /*
13265 * See if the config requires any additional preparation, e.g.
13266 * to adjust global state with pipes off. We need to do this
13267 * here so we can get the modeset_pipe updated config for the new
13268 * mode set on this crtc. For other crtcs we need to use the
13269 * adjusted_mode bits in the crtc directly.
13270 */
13271 if (dev_priv->display.modeset_calc_cdclk) {
13272 enum pipe pipe;
13273
13274 ret = dev_priv->display.modeset_calc_cdclk(state);
13275 if (ret < 0)
13276 return ret;
13277
13278 /*
13279 * Writes to dev_priv->cdclk.logical must protected by
13280 * holding all the crtc locks, even if we don't end up
13281 * touching the hardware
13282 */
13283 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13284 &intel_state->cdclk.logical)) {
13285 ret = intel_lock_all_pipes(state);
13286 if (ret < 0)
13287 return ret;
13288 }
13289
13290 if (is_power_of_2(intel_state->active_crtcs)) {
13291 struct drm_crtc *crtc;
13292 struct drm_crtc_state *crtc_state;
13293
13294 pipe = ilog2(intel_state->active_crtcs);
13295 crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
13296 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
13297 if (crtc_state && needs_modeset(crtc_state))
13298 pipe = INVALID_PIPE;
13299 } else {
13300 pipe = INVALID_PIPE;
13301 }
13302
13303 /* All pipes must be switched off while we change the cdclk. */
13304 if (pipe != INVALID_PIPE &&
13305 intel_cdclk_needs_cd2x_update(dev_priv,
13306 &dev_priv->cdclk.actual,
13307 &intel_state->cdclk.actual)) {
13308 ret = intel_lock_all_pipes(state);
13309 if (ret < 0)
13310 return ret;
13311
13312 intel_state->cdclk.pipe = pipe;
13313 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13314 &intel_state->cdclk.actual)) {
13315 ret = intel_modeset_all_pipes(state);
13316 if (ret < 0)
13317 return ret;
13318
13319 intel_state->cdclk.pipe = INVALID_PIPE;
13320 }
13321
13322 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13323 intel_state->cdclk.logical.cdclk,
13324 intel_state->cdclk.actual.cdclk);
13325 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13326 intel_state->cdclk.logical.voltage_level,
13327 intel_state->cdclk.actual.voltage_level);
13328 }
13329
13330 intel_modeset_clear_plls(state);
13331
13332 if (IS_HASWELL(dev_priv))
13333 return haswell_mode_set_planes_workaround(state);
13334
13335 return 0;
13336 }
13337
13338 /*
13339 * Handle calculation of various watermark data at the end of the atomic check
13340 * phase. The code here should be run after the per-crtc and per-plane 'check'
13341 * handlers to ensure that all derived state has been updated.
13342 */
13343 static int calc_watermark_data(struct intel_atomic_state *state)
13344 {
13345 struct drm_device *dev = state->base.dev;
13346 struct drm_i915_private *dev_priv = to_i915(dev);
13347
13348 /* Is there platform-specific watermark information to calculate? */
13349 if (dev_priv->display.compute_global_watermarks)
13350 return dev_priv->display.compute_global_watermarks(state);
13351
13352 return 0;
13353 }
13354
13355 /**
13356 * intel_atomic_check - validate state object
13357 * @dev: drm device
13358 * @state: state to validate
13359 */
13360 static int intel_atomic_check(struct drm_device *dev,
13361 struct drm_atomic_state *state)
13362 {
13363 struct drm_i915_private *dev_priv = to_i915(dev);
13364 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13365 struct drm_crtc *crtc;
13366 struct drm_crtc_state *old_crtc_state, *crtc_state;
13367 int ret, i;
13368 bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
13369
13370 /* Catch I915_MODE_FLAG_INHERITED */
13371 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
13372 crtc_state, i) {
13373 if (crtc_state->mode.private_flags !=
13374 old_crtc_state->mode.private_flags)
13375 crtc_state->mode_changed = true;
13376 }
13377
13378 ret = drm_atomic_helper_check_modeset(dev, state);
13379 if (ret)
13380 return ret;
13381
13382 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
13383 struct intel_crtc_state *pipe_config =
13384 to_intel_crtc_state(crtc_state);
13385
13386 if (!needs_modeset(crtc_state))
13387 continue;
13388
13389 if (!crtc_state->enable) {
13390 any_ms = true;
13391 continue;
13392 }
13393
13394 ret = intel_modeset_pipe_config(crtc, pipe_config);
13395 if (ret == -EDEADLK)
13396 return ret;
13397 if (ret) {
13398 intel_dump_pipe_config(to_intel_crtc(crtc),
13399 pipe_config, "[failed]");
13400 return ret;
13401 }
13402
13403 if (intel_pipe_config_compare(dev_priv,
13404 to_intel_crtc_state(old_crtc_state),
13405 pipe_config, true)) {
13406 crtc_state->mode_changed = false;
13407 pipe_config->update_pipe = true;
13408 }
13409
13410 if (needs_modeset(crtc_state))
13411 any_ms = true;
13412
13413 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13414 needs_modeset(crtc_state) ?
13415 "[modeset]" : "[fastset]");
13416 }
13417
13418 ret = drm_dp_mst_atomic_check(state);
13419 if (ret)
13420 return ret;
13421
13422 if (any_ms) {
13423 ret = intel_modeset_checks(state);
13424
13425 if (ret)
13426 return ret;
13427 } else {
13428 intel_state->cdclk.logical = dev_priv->cdclk.logical;
13429 }
13430
13431 ret = icl_add_linked_planes(intel_state);
13432 if (ret)
13433 return ret;
13434
13435 ret = drm_atomic_helper_check_planes(dev, state);
13436 if (ret)
13437 return ret;
13438
13439 intel_fbc_choose_crtc(dev_priv, intel_state);
13440 ret = calc_watermark_data(intel_state);
13441 if (ret)
13442 return ret;
13443
13444 ret = intel_bw_atomic_check(intel_state);
13445 if (ret)
13446 return ret;
13447
13448 return 0;
13449 }
13450
13451 static int intel_atomic_prepare_commit(struct drm_device *dev,
13452 struct drm_atomic_state *state)
13453 {
13454 return drm_atomic_helper_prepare_planes(dev, state);
13455 }
13456
13457 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13458 {
13459 struct drm_device *dev = crtc->base.dev;
13460 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13461
13462 if (!vblank->max_vblank_count)
13463 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13464
13465 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13466 }
13467
13468 static void intel_update_crtc(struct drm_crtc *crtc,
13469 struct drm_atomic_state *state,
13470 struct drm_crtc_state *old_crtc_state,
13471 struct drm_crtc_state *new_crtc_state)
13472 {
13473 struct drm_device *dev = crtc->dev;
13474 struct drm_i915_private *dev_priv = to_i915(dev);
13475 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13476 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13477 bool modeset = needs_modeset(new_crtc_state);
13478 struct intel_plane_state *new_plane_state =
13479 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13480 to_intel_plane(crtc->primary));
13481
13482 if (modeset) {
13483 update_scanline_offset(pipe_config);
13484 dev_priv->display.crtc_enable(pipe_config, state);
13485
13486 /* vblanks work again, re-enable pipe CRC. */
13487 intel_crtc_enable_pipe_crc(intel_crtc);
13488 } else {
13489 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13490 pipe_config);
13491
13492 if (pipe_config->update_pipe)
13493 intel_encoders_update_pipe(crtc, pipe_config, state);
13494 }
13495
13496 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13497 intel_fbc_disable(intel_crtc);
13498 else if (new_plane_state)
13499 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13500
13501 intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13502
13503 if (INTEL_GEN(dev_priv) >= 9)
13504 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13505 else
13506 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13507
13508 intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13509 }
13510
13511 static void intel_update_crtcs(struct drm_atomic_state *state)
13512 {
13513 struct drm_crtc *crtc;
13514 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13515 int i;
13516
13517 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13518 if (!new_crtc_state->active)
13519 continue;
13520
13521 intel_update_crtc(crtc, state, old_crtc_state,
13522 new_crtc_state);
13523 }
13524 }
13525
13526 static void skl_update_crtcs(struct drm_atomic_state *state)
13527 {
13528 struct drm_i915_private *dev_priv = to_i915(state->dev);
13529 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13530 struct drm_crtc *crtc;
13531 struct intel_crtc *intel_crtc;
13532 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13533 struct intel_crtc_state *cstate;
13534 unsigned int updated = 0;
13535 bool progress;
13536 enum pipe pipe;
13537 int i;
13538 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13539 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13540 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13541
13542 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13543 /* ignore allocations for crtc's that have been turned off. */
13544 if (new_crtc_state->active)
13545 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13546
13547 /* If 2nd DBuf slice required, enable it here */
13548 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13549 icl_dbuf_slices_update(dev_priv, required_slices);
13550
13551 /*
13552 * Whenever the number of active pipes changes, we need to make sure we
13553 * update the pipes in the right order so that their ddb allocations
13554 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13555 * cause pipe underruns and other bad stuff.
13556 */
13557 do {
13558 progress = false;
13559
13560 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13561 bool vbl_wait = false;
13562 unsigned int cmask = drm_crtc_mask(crtc);
13563
13564 intel_crtc = to_intel_crtc(crtc);
13565 cstate = to_intel_crtc_state(new_crtc_state);
13566 pipe = intel_crtc->pipe;
13567
13568 if (updated & cmask || !cstate->base.active)
13569 continue;
13570
13571 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13572 entries,
13573 INTEL_INFO(dev_priv)->num_pipes, i))
13574 continue;
13575
13576 updated |= cmask;
13577 entries[i] = cstate->wm.skl.ddb;
13578
13579 /*
13580 * If this is an already active pipe, it's DDB changed,
13581 * and this isn't the last pipe that needs updating
13582 * then we need to wait for a vblank to pass for the
13583 * new ddb allocation to take effect.
13584 */
13585 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13586 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13587 !new_crtc_state->active_changed &&
13588 intel_state->wm_results.dirty_pipes != updated)
13589 vbl_wait = true;
13590
13591 intel_update_crtc(crtc, state, old_crtc_state,
13592 new_crtc_state);
13593
13594 if (vbl_wait)
13595 intel_wait_for_vblank(dev_priv, pipe);
13596
13597 progress = true;
13598 }
13599 } while (progress);
13600
13601 /* If 2nd DBuf slice is no more required disable it */
13602 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13603 icl_dbuf_slices_update(dev_priv, required_slices);
13604 }
13605
13606 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13607 {
13608 struct intel_atomic_state *state, *next;
13609 struct llist_node *freed;
13610
13611 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13612 llist_for_each_entry_safe(state, next, freed, freed)
13613 drm_atomic_state_put(&state->base);
13614 }
13615
13616 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13617 {
13618 struct drm_i915_private *dev_priv =
13619 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13620
13621 intel_atomic_helper_free_state(dev_priv);
13622 }
13623
13624 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13625 {
13626 struct wait_queue_entry wait_fence, wait_reset;
13627 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13628
13629 init_wait_entry(&wait_fence, 0);
13630 init_wait_entry(&wait_reset, 0);
13631 for (;;) {
13632 prepare_to_wait(&intel_state->commit_ready.wait,
13633 &wait_fence, TASK_UNINTERRUPTIBLE);
13634 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13635 &wait_reset, TASK_UNINTERRUPTIBLE);
13636
13637
13638 if (i915_sw_fence_done(&intel_state->commit_ready)
13639 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13640 break;
13641
13642 schedule();
13643 }
13644 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13645 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13646 }
13647
13648 static void intel_atomic_cleanup_work(struct work_struct *work)
13649 {
13650 struct drm_atomic_state *state =
13651 container_of(work, struct drm_atomic_state, commit_work);
13652 struct drm_i915_private *i915 = to_i915(state->dev);
13653
13654 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13655 drm_atomic_helper_commit_cleanup_done(state);
13656 drm_atomic_state_put(state);
13657
13658 intel_atomic_helper_free_state(i915);
13659 }
13660
13661 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13662 {
13663 struct drm_device *dev = state->dev;
13664 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13665 struct drm_i915_private *dev_priv = to_i915(dev);
13666 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13667 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13668 struct drm_crtc *crtc;
13669 struct intel_crtc *intel_crtc;
13670 u64 put_domains[I915_MAX_PIPES] = {};
13671 intel_wakeref_t wakeref = 0;
13672 int i;
13673
13674 intel_atomic_commit_fence_wait(intel_state);
13675
13676 drm_atomic_helper_wait_for_dependencies(state);
13677
13678 if (intel_state->modeset)
13679 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13680
13681 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13682 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13683 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13684 intel_crtc = to_intel_crtc(crtc);
13685
13686 if (needs_modeset(new_crtc_state) ||
13687 to_intel_crtc_state(new_crtc_state)->update_pipe) {
13688
13689 put_domains[intel_crtc->pipe] =
13690 modeset_get_crtc_power_domains(crtc,
13691 new_intel_crtc_state);
13692 }
13693
13694 if (!needs_modeset(new_crtc_state))
13695 continue;
13696
13697 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13698
13699 if (old_crtc_state->active) {
13700 intel_crtc_disable_planes(intel_state, intel_crtc);
13701
13702 /*
13703 * We need to disable pipe CRC before disabling the pipe,
13704 * or we race against vblank off.
13705 */
13706 intel_crtc_disable_pipe_crc(intel_crtc);
13707
13708 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13709 intel_crtc->active = false;
13710 intel_fbc_disable(intel_crtc);
13711 intel_disable_shared_dpll(old_intel_crtc_state);
13712
13713 /*
13714 * Underruns don't always raise
13715 * interrupts, so check manually.
13716 */
13717 intel_check_cpu_fifo_underruns(dev_priv);
13718 intel_check_pch_fifo_underruns(dev_priv);
13719
13720 /* FIXME unify this for all platforms */
13721 if (!new_crtc_state->active &&
13722 !HAS_GMCH(dev_priv) &&
13723 dev_priv->display.initial_watermarks)
13724 dev_priv->display.initial_watermarks(intel_state,
13725 new_intel_crtc_state);
13726 }
13727 }
13728
13729 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13730 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13731 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13732
13733 if (intel_state->modeset) {
13734 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13735
13736 intel_set_cdclk_pre_plane_update(dev_priv,
13737 &intel_state->cdclk.actual,
13738 &dev_priv->cdclk.actual,
13739 intel_state->cdclk.pipe);
13740
13741 /*
13742 * SKL workaround: bspec recommends we disable the SAGV when we
13743 * have more then one pipe enabled
13744 */
13745 if (!intel_can_enable_sagv(state))
13746 intel_disable_sagv(dev_priv);
13747
13748 intel_modeset_verify_disabled(dev, state);
13749 }
13750
13751 /* Complete the events for pipes that have now been disabled */
13752 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13753 bool modeset = needs_modeset(new_crtc_state);
13754
13755 /* Complete events for now disable pipes here. */
13756 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13757 spin_lock_irq(&dev->event_lock);
13758 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13759 spin_unlock_irq(&dev->event_lock);
13760
13761 new_crtc_state->event = NULL;
13762 }
13763 }
13764
13765 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13766 dev_priv->display.update_crtcs(state);
13767
13768 if (intel_state->modeset)
13769 intel_set_cdclk_post_plane_update(dev_priv,
13770 &intel_state->cdclk.actual,
13771 &dev_priv->cdclk.actual,
13772 intel_state->cdclk.pipe);
13773
13774 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13775 * already, but still need the state for the delayed optimization. To
13776 * fix this:
13777 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13778 * - schedule that vblank worker _before_ calling hw_done
13779 * - at the start of commit_tail, cancel it _synchrously
13780 * - switch over to the vblank wait helper in the core after that since
13781 * we don't need out special handling any more.
13782 */
13783 drm_atomic_helper_wait_for_flip_done(dev, state);
13784
13785 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13786 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13787
13788 if (new_crtc_state->active &&
13789 !needs_modeset(new_crtc_state) &&
13790 (new_intel_crtc_state->base.color_mgmt_changed ||
13791 new_intel_crtc_state->update_pipe))
13792 intel_color_load_luts(new_intel_crtc_state);
13793 }
13794
13795 /*
13796 * Now that the vblank has passed, we can go ahead and program the
13797 * optimal watermarks on platforms that need two-step watermark
13798 * programming.
13799 *
13800 * TODO: Move this (and other cleanup) to an async worker eventually.
13801 */
13802 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13803 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13804
13805 if (dev_priv->display.optimize_watermarks)
13806 dev_priv->display.optimize_watermarks(intel_state,
13807 new_intel_crtc_state);
13808 }
13809
13810 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13811 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13812
13813 if (put_domains[i])
13814 modeset_put_power_domains(dev_priv, put_domains[i]);
13815
13816 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13817 }
13818
13819 if (intel_state->modeset)
13820 intel_verify_planes(intel_state);
13821
13822 if (intel_state->modeset && intel_can_enable_sagv(state))
13823 intel_enable_sagv(dev_priv);
13824
13825 drm_atomic_helper_commit_hw_done(state);
13826
13827 if (intel_state->modeset) {
13828 /* As one of the primary mmio accessors, KMS has a high
13829 * likelihood of triggering bugs in unclaimed access. After we
13830 * finish modesetting, see if an error has been flagged, and if
13831 * so enable debugging for the next modeset - and hope we catch
13832 * the culprit.
13833 */
13834 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13835 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13836 }
13837
13838 /*
13839 * Defer the cleanup of the old state to a separate worker to not
13840 * impede the current task (userspace for blocking modesets) that
13841 * are executed inline. For out-of-line asynchronous modesets/flips,
13842 * deferring to a new worker seems overkill, but we would place a
13843 * schedule point (cond_resched()) here anyway to keep latencies
13844 * down.
13845 */
13846 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13847 queue_work(system_highpri_wq, &state->commit_work);
13848 }
13849
13850 static void intel_atomic_commit_work(struct work_struct *work)
13851 {
13852 struct drm_atomic_state *state =
13853 container_of(work, struct drm_atomic_state, commit_work);
13854
13855 intel_atomic_commit_tail(state);
13856 }
13857
13858 static int __i915_sw_fence_call
13859 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13860 enum i915_sw_fence_notify notify)
13861 {
13862 struct intel_atomic_state *state =
13863 container_of(fence, struct intel_atomic_state, commit_ready);
13864
13865 switch (notify) {
13866 case FENCE_COMPLETE:
13867 /* we do blocking waits in the worker, nothing to do here */
13868 break;
13869 case FENCE_FREE:
13870 {
13871 struct intel_atomic_helper *helper =
13872 &to_i915(state->base.dev)->atomic_helper;
13873
13874 if (llist_add(&state->freed, &helper->free_list))
13875 schedule_work(&helper->free_work);
13876 break;
13877 }
13878 }
13879
13880 return NOTIFY_DONE;
13881 }
13882
13883 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13884 {
13885 struct drm_plane_state *old_plane_state, *new_plane_state;
13886 struct drm_plane *plane;
13887 int i;
13888
13889 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13890 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13891 intel_fb_obj(new_plane_state->fb),
13892 to_intel_plane(plane)->frontbuffer_bit);
13893 }
13894
13895 /**
13896 * intel_atomic_commit - commit validated state object
13897 * @dev: DRM device
13898 * @state: the top-level driver state object
13899 * @nonblock: nonblocking commit
13900 *
13901 * This function commits a top-level state object that has been validated
13902 * with drm_atomic_helper_check().
13903 *
13904 * RETURNS
13905 * Zero for success or -errno.
13906 */
13907 static int intel_atomic_commit(struct drm_device *dev,
13908 struct drm_atomic_state *state,
13909 bool nonblock)
13910 {
13911 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13912 struct drm_i915_private *dev_priv = to_i915(dev);
13913 int ret = 0;
13914
13915 drm_atomic_state_get(state);
13916 i915_sw_fence_init(&intel_state->commit_ready,
13917 intel_atomic_commit_ready);
13918
13919 /*
13920 * The intel_legacy_cursor_update() fast path takes care
13921 * of avoiding the vblank waits for simple cursor
13922 * movement and flips. For cursor on/off and size changes,
13923 * we want to perform the vblank waits so that watermark
13924 * updates happen during the correct frames. Gen9+ have
13925 * double buffered watermarks and so shouldn't need this.
13926 *
13927 * Unset state->legacy_cursor_update before the call to
13928 * drm_atomic_helper_setup_commit() because otherwise
13929 * drm_atomic_helper_wait_for_flip_done() is a noop and
13930 * we get FIFO underruns because we didn't wait
13931 * for vblank.
13932 *
13933 * FIXME doing watermarks and fb cleanup from a vblank worker
13934 * (assuming we had any) would solve these problems.
13935 */
13936 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13937 struct intel_crtc_state *new_crtc_state;
13938 struct intel_crtc *crtc;
13939 int i;
13940
13941 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13942 if (new_crtc_state->wm.need_postvbl_update ||
13943 new_crtc_state->update_wm_post)
13944 state->legacy_cursor_update = false;
13945 }
13946
13947 ret = intel_atomic_prepare_commit(dev, state);
13948 if (ret) {
13949 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13950 i915_sw_fence_commit(&intel_state->commit_ready);
13951 return ret;
13952 }
13953
13954 ret = drm_atomic_helper_setup_commit(state, nonblock);
13955 if (!ret)
13956 ret = drm_atomic_helper_swap_state(state, true);
13957
13958 if (ret) {
13959 i915_sw_fence_commit(&intel_state->commit_ready);
13960
13961 drm_atomic_helper_cleanup_planes(dev, state);
13962 return ret;
13963 }
13964 dev_priv->wm.distrust_bios_wm = false;
13965 intel_shared_dpll_swap_state(state);
13966 intel_atomic_track_fbs(state);
13967
13968 if (intel_state->modeset) {
13969 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13970 sizeof(intel_state->min_cdclk));
13971 memcpy(dev_priv->min_voltage_level,
13972 intel_state->min_voltage_level,
13973 sizeof(intel_state->min_voltage_level));
13974 dev_priv->active_crtcs = intel_state->active_crtcs;
13975 dev_priv->cdclk.force_min_cdclk =
13976 intel_state->cdclk.force_min_cdclk;
13977
13978 intel_cdclk_swap_state(intel_state);
13979 }
13980
13981 drm_atomic_state_get(state);
13982 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13983
13984 i915_sw_fence_commit(&intel_state->commit_ready);
13985 if (nonblock && intel_state->modeset) {
13986 queue_work(dev_priv->modeset_wq, &state->commit_work);
13987 } else if (nonblock) {
13988 queue_work(system_unbound_wq, &state->commit_work);
13989 } else {
13990 if (intel_state->modeset)
13991 flush_workqueue(dev_priv->modeset_wq);
13992 intel_atomic_commit_tail(state);
13993 }
13994
13995 return 0;
13996 }
13997
13998 static const struct drm_crtc_funcs intel_crtc_funcs = {
13999 .gamma_set = drm_atomic_helper_legacy_gamma_set,
14000 .set_config = drm_atomic_helper_set_config,
14001 .destroy = intel_crtc_destroy,
14002 .page_flip = drm_atomic_helper_page_flip,
14003 .atomic_duplicate_state = intel_crtc_duplicate_state,
14004 .atomic_destroy_state = intel_crtc_destroy_state,
14005 .set_crc_source = intel_crtc_set_crc_source,
14006 .verify_crc_source = intel_crtc_verify_crc_source,
14007 .get_crc_sources = intel_crtc_get_crc_sources,
14008 };
14009
14010 struct wait_rps_boost {
14011 struct wait_queue_entry wait;
14012
14013 struct drm_crtc *crtc;
14014 struct i915_request *request;
14015 };
14016
14017 static int do_rps_boost(struct wait_queue_entry *_wait,
14018 unsigned mode, int sync, void *key)
14019 {
14020 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14021 struct i915_request *rq = wait->request;
14022
14023 /*
14024 * If we missed the vblank, but the request is already running it
14025 * is reasonable to assume that it will complete before the next
14026 * vblank without our intervention, so leave RPS alone.
14027 */
14028 if (!i915_request_started(rq))
14029 gen6_rps_boost(rq);
14030 i915_request_put(rq);
14031
14032 drm_crtc_vblank_put(wait->crtc);
14033
14034 list_del(&wait->wait.entry);
14035 kfree(wait);
14036 return 1;
14037 }
14038
14039 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14040 struct dma_fence *fence)
14041 {
14042 struct wait_rps_boost *wait;
14043
14044 if (!dma_fence_is_i915(fence))
14045 return;
14046
14047 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14048 return;
14049
14050 if (drm_crtc_vblank_get(crtc))
14051 return;
14052
14053 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14054 if (!wait) {
14055 drm_crtc_vblank_put(crtc);
14056 return;
14057 }
14058
14059 wait->request = to_request(dma_fence_get(fence));
14060 wait->crtc = crtc;
14061
14062 wait->wait.func = do_rps_boost;
14063 wait->wait.flags = 0;
14064
14065 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14066 }
14067
14068 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14069 {
14070 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14071 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14072 struct drm_framebuffer *fb = plane_state->base.fb;
14073 struct i915_vma *vma;
14074
14075 if (plane->id == PLANE_CURSOR &&
14076 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14077 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14078 const int align = intel_cursor_alignment(dev_priv);
14079 int err;
14080
14081 err = i915_gem_object_attach_phys(obj, align);
14082 if (err)
14083 return err;
14084 }
14085
14086 vma = intel_pin_and_fence_fb_obj(fb,
14087 &plane_state->view,
14088 intel_plane_uses_fence(plane_state),
14089 &plane_state->flags);
14090 if (IS_ERR(vma))
14091 return PTR_ERR(vma);
14092
14093 plane_state->vma = vma;
14094
14095 return 0;
14096 }
14097
14098 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14099 {
14100 struct i915_vma *vma;
14101
14102 vma = fetch_and_zero(&old_plane_state->vma);
14103 if (vma)
14104 intel_unpin_fb_vma(vma, old_plane_state->flags);
14105 }
14106
14107 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14108 {
14109 struct i915_sched_attr attr = {
14110 .priority = I915_PRIORITY_DISPLAY,
14111 };
14112
14113 i915_gem_object_wait_priority(obj, 0, &attr);
14114 }
14115
14116 /**
14117 * intel_prepare_plane_fb - Prepare fb for usage on plane
14118 * @plane: drm plane to prepare for
14119 * @new_state: the plane state being prepared
14120 *
14121 * Prepares a framebuffer for usage on a display plane. Generally this
14122 * involves pinning the underlying object and updating the frontbuffer tracking
14123 * bits. Some older platforms need special physical address handling for
14124 * cursor planes.
14125 *
14126 * Must be called with struct_mutex held.
14127 *
14128 * Returns 0 on success, negative error code on failure.
14129 */
14130 int
14131 intel_prepare_plane_fb(struct drm_plane *plane,
14132 struct drm_plane_state *new_state)
14133 {
14134 struct intel_atomic_state *intel_state =
14135 to_intel_atomic_state(new_state->state);
14136 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14137 struct drm_framebuffer *fb = new_state->fb;
14138 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14139 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14140 int ret;
14141
14142 if (old_obj) {
14143 struct drm_crtc_state *crtc_state =
14144 drm_atomic_get_new_crtc_state(new_state->state,
14145 plane->state->crtc);
14146
14147 /* Big Hammer, we also need to ensure that any pending
14148 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14149 * current scanout is retired before unpinning the old
14150 * framebuffer. Note that we rely on userspace rendering
14151 * into the buffer attached to the pipe they are waiting
14152 * on. If not, userspace generates a GPU hang with IPEHR
14153 * point to the MI_WAIT_FOR_EVENT.
14154 *
14155 * This should only fail upon a hung GPU, in which case we
14156 * can safely continue.
14157 */
14158 if (needs_modeset(crtc_state)) {
14159 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14160 old_obj->resv, NULL,
14161 false, 0,
14162 GFP_KERNEL);
14163 if (ret < 0)
14164 return ret;
14165 }
14166 }
14167
14168 if (new_state->fence) { /* explicit fencing */
14169 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14170 new_state->fence,
14171 I915_FENCE_TIMEOUT,
14172 GFP_KERNEL);
14173 if (ret < 0)
14174 return ret;
14175 }
14176
14177 if (!obj)
14178 return 0;
14179
14180 ret = i915_gem_object_pin_pages(obj);
14181 if (ret)
14182 return ret;
14183
14184 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14185 if (ret) {
14186 i915_gem_object_unpin_pages(obj);
14187 return ret;
14188 }
14189
14190 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14191
14192 mutex_unlock(&dev_priv->drm.struct_mutex);
14193 i915_gem_object_unpin_pages(obj);
14194 if (ret)
14195 return ret;
14196
14197 fb_obj_bump_render_priority(obj);
14198 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14199
14200 if (!new_state->fence) { /* implicit fencing */
14201 struct dma_fence *fence;
14202
14203 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14204 obj->resv, NULL,
14205 false, I915_FENCE_TIMEOUT,
14206 GFP_KERNEL);
14207 if (ret < 0)
14208 return ret;
14209
14210 fence = reservation_object_get_excl_rcu(obj->resv);
14211 if (fence) {
14212 add_rps_boost_after_vblank(new_state->crtc, fence);
14213 dma_fence_put(fence);
14214 }
14215 } else {
14216 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14217 }
14218
14219 /*
14220 * We declare pageflips to be interactive and so merit a small bias
14221 * towards upclocking to deliver the frame on time. By only changing
14222 * the RPS thresholds to sample more regularly and aim for higher
14223 * clocks we can hopefully deliver low power workloads (like kodi)
14224 * that are not quite steady state without resorting to forcing
14225 * maximum clocks following a vblank miss (see do_rps_boost()).
14226 */
14227 if (!intel_state->rps_interactive) {
14228 intel_rps_mark_interactive(dev_priv, true);
14229 intel_state->rps_interactive = true;
14230 }
14231
14232 return 0;
14233 }
14234
14235 /**
14236 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14237 * @plane: drm plane to clean up for
14238 * @old_state: the state from the previous modeset
14239 *
14240 * Cleans up a framebuffer that has just been removed from a plane.
14241 *
14242 * Must be called with struct_mutex held.
14243 */
14244 void
14245 intel_cleanup_plane_fb(struct drm_plane *plane,
14246 struct drm_plane_state *old_state)
14247 {
14248 struct intel_atomic_state *intel_state =
14249 to_intel_atomic_state(old_state->state);
14250 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14251
14252 if (intel_state->rps_interactive) {
14253 intel_rps_mark_interactive(dev_priv, false);
14254 intel_state->rps_interactive = false;
14255 }
14256
14257 /* Should only be called after a successful intel_prepare_plane_fb()! */
14258 mutex_lock(&dev_priv->drm.struct_mutex);
14259 intel_plane_unpin_fb(to_intel_plane_state(old_state));
14260 mutex_unlock(&dev_priv->drm.struct_mutex);
14261 }
14262
14263 int
14264 skl_max_scale(const struct intel_crtc_state *crtc_state,
14265 u32 pixel_format)
14266 {
14267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14269 int max_scale, mult;
14270 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14271
14272 if (!crtc_state->base.enable)
14273 return DRM_PLANE_HELPER_NO_SCALING;
14274
14275 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14276 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14277
14278 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14279 max_dotclk *= 2;
14280
14281 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14282 return DRM_PLANE_HELPER_NO_SCALING;
14283
14284 /*
14285 * skl max scale is lower of:
14286 * close to 3 but not 3, -1 is for that purpose
14287 * or
14288 * cdclk/crtc_clock
14289 */
14290 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14291 tmpclk1 = (1 << 16) * mult - 1;
14292 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14293 max_scale = min(tmpclk1, tmpclk2);
14294
14295 return max_scale;
14296 }
14297
14298 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14299 struct intel_crtc *crtc)
14300 {
14301 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14302 struct intel_crtc_state *old_crtc_state =
14303 intel_atomic_get_old_crtc_state(state, crtc);
14304 struct intel_crtc_state *new_crtc_state =
14305 intel_atomic_get_new_crtc_state(state, crtc);
14306 bool modeset = needs_modeset(&new_crtc_state->base);
14307
14308 /* Perform vblank evasion around commit operation */
14309 intel_pipe_update_start(new_crtc_state);
14310
14311 if (modeset)
14312 goto out;
14313
14314 if (new_crtc_state->base.color_mgmt_changed ||
14315 new_crtc_state->update_pipe)
14316 intel_color_commit(new_crtc_state);
14317
14318 if (new_crtc_state->update_pipe)
14319 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14320 else if (INTEL_GEN(dev_priv) >= 9)
14321 skl_detach_scalers(new_crtc_state);
14322
14323 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14324 bdw_set_pipemisc(new_crtc_state);
14325
14326 out:
14327 if (dev_priv->display.atomic_update_watermarks)
14328 dev_priv->display.atomic_update_watermarks(state,
14329 new_crtc_state);
14330 }
14331
14332 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14333 struct intel_crtc_state *crtc_state)
14334 {
14335 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14336
14337 if (!IS_GEN(dev_priv, 2))
14338 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14339
14340 if (crtc_state->has_pch_encoder) {
14341 enum pipe pch_transcoder =
14342 intel_crtc_pch_transcoder(crtc);
14343
14344 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14345 }
14346 }
14347
14348 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14349 struct intel_crtc *crtc)
14350 {
14351 struct intel_crtc_state *old_crtc_state =
14352 intel_atomic_get_old_crtc_state(state, crtc);
14353 struct intel_crtc_state *new_crtc_state =
14354 intel_atomic_get_new_crtc_state(state, crtc);
14355
14356 intel_pipe_update_end(new_crtc_state);
14357
14358 if (new_crtc_state->update_pipe &&
14359 !needs_modeset(&new_crtc_state->base) &&
14360 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14361 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14362 }
14363
14364 /**
14365 * intel_plane_destroy - destroy a plane
14366 * @plane: plane to destroy
14367 *
14368 * Common destruction function for all types of planes (primary, cursor,
14369 * sprite).
14370 */
14371 void intel_plane_destroy(struct drm_plane *plane)
14372 {
14373 drm_plane_cleanup(plane);
14374 kfree(to_intel_plane(plane));
14375 }
14376
14377 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14378 u32 format, u64 modifier)
14379 {
14380 switch (modifier) {
14381 case DRM_FORMAT_MOD_LINEAR:
14382 case I915_FORMAT_MOD_X_TILED:
14383 break;
14384 default:
14385 return false;
14386 }
14387
14388 switch (format) {
14389 case DRM_FORMAT_C8:
14390 case DRM_FORMAT_RGB565:
14391 case DRM_FORMAT_XRGB1555:
14392 case DRM_FORMAT_XRGB8888:
14393 return modifier == DRM_FORMAT_MOD_LINEAR ||
14394 modifier == I915_FORMAT_MOD_X_TILED;
14395 default:
14396 return false;
14397 }
14398 }
14399
14400 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14401 u32 format, u64 modifier)
14402 {
14403 switch (modifier) {
14404 case DRM_FORMAT_MOD_LINEAR:
14405 case I915_FORMAT_MOD_X_TILED:
14406 break;
14407 default:
14408 return false;
14409 }
14410
14411 switch (format) {
14412 case DRM_FORMAT_C8:
14413 case DRM_FORMAT_RGB565:
14414 case DRM_FORMAT_XRGB8888:
14415 case DRM_FORMAT_XBGR8888:
14416 case DRM_FORMAT_XRGB2101010:
14417 case DRM_FORMAT_XBGR2101010:
14418 return modifier == DRM_FORMAT_MOD_LINEAR ||
14419 modifier == I915_FORMAT_MOD_X_TILED;
14420 default:
14421 return false;
14422 }
14423 }
14424
14425 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14426 u32 format, u64 modifier)
14427 {
14428 return modifier == DRM_FORMAT_MOD_LINEAR &&
14429 format == DRM_FORMAT_ARGB8888;
14430 }
14431
14432 static const struct drm_plane_funcs i965_plane_funcs = {
14433 .update_plane = drm_atomic_helper_update_plane,
14434 .disable_plane = drm_atomic_helper_disable_plane,
14435 .destroy = intel_plane_destroy,
14436 .atomic_get_property = intel_plane_atomic_get_property,
14437 .atomic_set_property = intel_plane_atomic_set_property,
14438 .atomic_duplicate_state = intel_plane_duplicate_state,
14439 .atomic_destroy_state = intel_plane_destroy_state,
14440 .format_mod_supported = i965_plane_format_mod_supported,
14441 };
14442
14443 static const struct drm_plane_funcs i8xx_plane_funcs = {
14444 .update_plane = drm_atomic_helper_update_plane,
14445 .disable_plane = drm_atomic_helper_disable_plane,
14446 .destroy = intel_plane_destroy,
14447 .atomic_get_property = intel_plane_atomic_get_property,
14448 .atomic_set_property = intel_plane_atomic_set_property,
14449 .atomic_duplicate_state = intel_plane_duplicate_state,
14450 .atomic_destroy_state = intel_plane_destroy_state,
14451 .format_mod_supported = i8xx_plane_format_mod_supported,
14452 };
14453
14454 static int
14455 intel_legacy_cursor_update(struct drm_plane *plane,
14456 struct drm_crtc *crtc,
14457 struct drm_framebuffer *fb,
14458 int crtc_x, int crtc_y,
14459 unsigned int crtc_w, unsigned int crtc_h,
14460 u32 src_x, u32 src_y,
14461 u32 src_w, u32 src_h,
14462 struct drm_modeset_acquire_ctx *ctx)
14463 {
14464 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14465 int ret;
14466 struct drm_plane_state *old_plane_state, *new_plane_state;
14467 struct intel_plane *intel_plane = to_intel_plane(plane);
14468 struct drm_framebuffer *old_fb;
14469 struct intel_crtc_state *crtc_state =
14470 to_intel_crtc_state(crtc->state);
14471 struct intel_crtc_state *new_crtc_state;
14472
14473 /*
14474 * When crtc is inactive or there is a modeset pending,
14475 * wait for it to complete in the slowpath
14476 */
14477 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14478 crtc_state->update_pipe)
14479 goto slow;
14480
14481 old_plane_state = plane->state;
14482 /*
14483 * Don't do an async update if there is an outstanding commit modifying
14484 * the plane. This prevents our async update's changes from getting
14485 * overridden by a previous synchronous update's state.
14486 */
14487 if (old_plane_state->commit &&
14488 !try_wait_for_completion(&old_plane_state->commit->hw_done))
14489 goto slow;
14490
14491 /*
14492 * If any parameters change that may affect watermarks,
14493 * take the slowpath. Only changing fb or position should be
14494 * in the fastpath.
14495 */
14496 if (old_plane_state->crtc != crtc ||
14497 old_plane_state->src_w != src_w ||
14498 old_plane_state->src_h != src_h ||
14499 old_plane_state->crtc_w != crtc_w ||
14500 old_plane_state->crtc_h != crtc_h ||
14501 !old_plane_state->fb != !fb)
14502 goto slow;
14503
14504 new_plane_state = intel_plane_duplicate_state(plane);
14505 if (!new_plane_state)
14506 return -ENOMEM;
14507
14508 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14509 if (!new_crtc_state) {
14510 ret = -ENOMEM;
14511 goto out_free;
14512 }
14513
14514 drm_atomic_set_fb_for_plane(new_plane_state, fb);
14515
14516 new_plane_state->src_x = src_x;
14517 new_plane_state->src_y = src_y;
14518 new_plane_state->src_w = src_w;
14519 new_plane_state->src_h = src_h;
14520 new_plane_state->crtc_x = crtc_x;
14521 new_plane_state->crtc_y = crtc_y;
14522 new_plane_state->crtc_w = crtc_w;
14523 new_plane_state->crtc_h = crtc_h;
14524
14525 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14526 to_intel_plane_state(old_plane_state),
14527 to_intel_plane_state(new_plane_state));
14528 if (ret)
14529 goto out_free;
14530
14531 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14532 if (ret)
14533 goto out_free;
14534
14535 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14536 if (ret)
14537 goto out_unlock;
14538
14539 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14540
14541 old_fb = old_plane_state->fb;
14542 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14543 intel_plane->frontbuffer_bit);
14544
14545 /* Swap plane state */
14546 plane->state = new_plane_state;
14547
14548 /*
14549 * We cannot swap crtc_state as it may be in use by an atomic commit or
14550 * page flip that's running simultaneously. If we swap crtc_state and
14551 * destroy the old state, we will cause a use-after-free there.
14552 *
14553 * Only update active_planes, which is needed for our internal
14554 * bookkeeping. Either value will do the right thing when updating
14555 * planes atomically. If the cursor was part of the atomic update then
14556 * we would have taken the slowpath.
14557 */
14558 crtc_state->active_planes = new_crtc_state->active_planes;
14559
14560 if (plane->state->visible)
14561 intel_update_plane(intel_plane, crtc_state,
14562 to_intel_plane_state(plane->state));
14563 else
14564 intel_disable_plane(intel_plane, crtc_state);
14565
14566 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14567
14568 out_unlock:
14569 mutex_unlock(&dev_priv->drm.struct_mutex);
14570 out_free:
14571 if (new_crtc_state)
14572 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14573 if (ret)
14574 intel_plane_destroy_state(plane, new_plane_state);
14575 else
14576 intel_plane_destroy_state(plane, old_plane_state);
14577 return ret;
14578
14579 slow:
14580 return drm_atomic_helper_update_plane(plane, crtc, fb,
14581 crtc_x, crtc_y, crtc_w, crtc_h,
14582 src_x, src_y, src_w, src_h, ctx);
14583 }
14584
14585 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14586 .update_plane = intel_legacy_cursor_update,
14587 .disable_plane = drm_atomic_helper_disable_plane,
14588 .destroy = intel_plane_destroy,
14589 .atomic_get_property = intel_plane_atomic_get_property,
14590 .atomic_set_property = intel_plane_atomic_set_property,
14591 .atomic_duplicate_state = intel_plane_duplicate_state,
14592 .atomic_destroy_state = intel_plane_destroy_state,
14593 .format_mod_supported = intel_cursor_format_mod_supported,
14594 };
14595
14596 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14597 enum i9xx_plane_id i9xx_plane)
14598 {
14599 if (!HAS_FBC(dev_priv))
14600 return false;
14601
14602 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14603 return i9xx_plane == PLANE_A; /* tied to pipe A */
14604 else if (IS_IVYBRIDGE(dev_priv))
14605 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14606 i9xx_plane == PLANE_C;
14607 else if (INTEL_GEN(dev_priv) >= 4)
14608 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14609 else
14610 return i9xx_plane == PLANE_A;
14611 }
14612
14613 static struct intel_plane *
14614 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14615 {
14616 struct intel_plane *plane;
14617 const struct drm_plane_funcs *plane_funcs;
14618 unsigned int supported_rotations;
14619 unsigned int possible_crtcs;
14620 const u64 *modifiers;
14621 const u32 *formats;
14622 int num_formats;
14623 int ret;
14624
14625 if (INTEL_GEN(dev_priv) >= 9)
14626 return skl_universal_plane_create(dev_priv, pipe,
14627 PLANE_PRIMARY);
14628
14629 plane = intel_plane_alloc();
14630 if (IS_ERR(plane))
14631 return plane;
14632
14633 plane->pipe = pipe;
14634 /*
14635 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14636 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14637 */
14638 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14639 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14640 else
14641 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14642 plane->id = PLANE_PRIMARY;
14643 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14644
14645 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14646 if (plane->has_fbc) {
14647 struct intel_fbc *fbc = &dev_priv->fbc;
14648
14649 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14650 }
14651
14652 if (INTEL_GEN(dev_priv) >= 4) {
14653 formats = i965_primary_formats;
14654 num_formats = ARRAY_SIZE(i965_primary_formats);
14655 modifiers = i9xx_format_modifiers;
14656
14657 plane->max_stride = i9xx_plane_max_stride;
14658 plane->update_plane = i9xx_update_plane;
14659 plane->disable_plane = i9xx_disable_plane;
14660 plane->get_hw_state = i9xx_plane_get_hw_state;
14661 plane->check_plane = i9xx_plane_check;
14662
14663 plane_funcs = &i965_plane_funcs;
14664 } else {
14665 formats = i8xx_primary_formats;
14666 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14667 modifiers = i9xx_format_modifiers;
14668
14669 plane->max_stride = i9xx_plane_max_stride;
14670 plane->update_plane = i9xx_update_plane;
14671 plane->disable_plane = i9xx_disable_plane;
14672 plane->get_hw_state = i9xx_plane_get_hw_state;
14673 plane->check_plane = i9xx_plane_check;
14674
14675 plane_funcs = &i8xx_plane_funcs;
14676 }
14677
14678 possible_crtcs = BIT(pipe);
14679
14680 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14681 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14682 possible_crtcs, plane_funcs,
14683 formats, num_formats, modifiers,
14684 DRM_PLANE_TYPE_PRIMARY,
14685 "primary %c", pipe_name(pipe));
14686 else
14687 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14688 possible_crtcs, plane_funcs,
14689 formats, num_formats, modifiers,
14690 DRM_PLANE_TYPE_PRIMARY,
14691 "plane %c",
14692 plane_name(plane->i9xx_plane));
14693 if (ret)
14694 goto fail;
14695
14696 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14697 supported_rotations =
14698 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14699 DRM_MODE_REFLECT_X;
14700 } else if (INTEL_GEN(dev_priv) >= 4) {
14701 supported_rotations =
14702 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14703 } else {
14704 supported_rotations = DRM_MODE_ROTATE_0;
14705 }
14706
14707 if (INTEL_GEN(dev_priv) >= 4)
14708 drm_plane_create_rotation_property(&plane->base,
14709 DRM_MODE_ROTATE_0,
14710 supported_rotations);
14711
14712 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14713
14714 return plane;
14715
14716 fail:
14717 intel_plane_free(plane);
14718
14719 return ERR_PTR(ret);
14720 }
14721
14722 static struct intel_plane *
14723 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14724 enum pipe pipe)
14725 {
14726 unsigned int possible_crtcs;
14727 struct intel_plane *cursor;
14728 int ret;
14729
14730 cursor = intel_plane_alloc();
14731 if (IS_ERR(cursor))
14732 return cursor;
14733
14734 cursor->pipe = pipe;
14735 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14736 cursor->id = PLANE_CURSOR;
14737 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14738
14739 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14740 cursor->max_stride = i845_cursor_max_stride;
14741 cursor->update_plane = i845_update_cursor;
14742 cursor->disable_plane = i845_disable_cursor;
14743 cursor->get_hw_state = i845_cursor_get_hw_state;
14744 cursor->check_plane = i845_check_cursor;
14745 } else {
14746 cursor->max_stride = i9xx_cursor_max_stride;
14747 cursor->update_plane = i9xx_update_cursor;
14748 cursor->disable_plane = i9xx_disable_cursor;
14749 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14750 cursor->check_plane = i9xx_check_cursor;
14751 }
14752
14753 cursor->cursor.base = ~0;
14754 cursor->cursor.cntl = ~0;
14755
14756 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14757 cursor->cursor.size = ~0;
14758
14759 possible_crtcs = BIT(pipe);
14760
14761 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14762 possible_crtcs, &intel_cursor_plane_funcs,
14763 intel_cursor_formats,
14764 ARRAY_SIZE(intel_cursor_formats),
14765 cursor_format_modifiers,
14766 DRM_PLANE_TYPE_CURSOR,
14767 "cursor %c", pipe_name(pipe));
14768 if (ret)
14769 goto fail;
14770
14771 if (INTEL_GEN(dev_priv) >= 4)
14772 drm_plane_create_rotation_property(&cursor->base,
14773 DRM_MODE_ROTATE_0,
14774 DRM_MODE_ROTATE_0 |
14775 DRM_MODE_ROTATE_180);
14776
14777 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14778
14779 return cursor;
14780
14781 fail:
14782 intel_plane_free(cursor);
14783
14784 return ERR_PTR(ret);
14785 }
14786
14787 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14788 struct intel_crtc_state *crtc_state)
14789 {
14790 struct intel_crtc_scaler_state *scaler_state =
14791 &crtc_state->scaler_state;
14792 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14793 int i;
14794
14795 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14796 if (!crtc->num_scalers)
14797 return;
14798
14799 for (i = 0; i < crtc->num_scalers; i++) {
14800 struct intel_scaler *scaler = &scaler_state->scalers[i];
14801
14802 scaler->in_use = 0;
14803 scaler->mode = 0;
14804 }
14805
14806 scaler_state->scaler_id = -1;
14807 }
14808
14809 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14810 {
14811 struct intel_crtc *intel_crtc;
14812 struct intel_crtc_state *crtc_state = NULL;
14813 struct intel_plane *primary = NULL;
14814 struct intel_plane *cursor = NULL;
14815 int sprite, ret;
14816
14817 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14818 if (!intel_crtc)
14819 return -ENOMEM;
14820
14821 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14822 if (!crtc_state) {
14823 ret = -ENOMEM;
14824 goto fail;
14825 }
14826 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
14827 intel_crtc->config = crtc_state;
14828
14829 primary = intel_primary_plane_create(dev_priv, pipe);
14830 if (IS_ERR(primary)) {
14831 ret = PTR_ERR(primary);
14832 goto fail;
14833 }
14834 intel_crtc->plane_ids_mask |= BIT(primary->id);
14835
14836 for_each_sprite(dev_priv, pipe, sprite) {
14837 struct intel_plane *plane;
14838
14839 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14840 if (IS_ERR(plane)) {
14841 ret = PTR_ERR(plane);
14842 goto fail;
14843 }
14844 intel_crtc->plane_ids_mask |= BIT(plane->id);
14845 }
14846
14847 cursor = intel_cursor_plane_create(dev_priv, pipe);
14848 if (IS_ERR(cursor)) {
14849 ret = PTR_ERR(cursor);
14850 goto fail;
14851 }
14852 intel_crtc->plane_ids_mask |= BIT(cursor->id);
14853
14854 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14855 &primary->base, &cursor->base,
14856 &intel_crtc_funcs,
14857 "pipe %c", pipe_name(pipe));
14858 if (ret)
14859 goto fail;
14860
14861 intel_crtc->pipe = pipe;
14862
14863 /* initialize shared scalers */
14864 intel_crtc_init_scalers(intel_crtc, crtc_state);
14865
14866 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14867 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14868 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14869
14870 if (INTEL_GEN(dev_priv) < 9) {
14871 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14872
14873 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14874 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14875 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14876 }
14877
14878 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14879
14880 intel_color_init(intel_crtc);
14881
14882 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14883
14884 return 0;
14885
14886 fail:
14887 /*
14888 * drm_mode_config_cleanup() will free up any
14889 * crtcs/planes already initialized.
14890 */
14891 kfree(crtc_state);
14892 kfree(intel_crtc);
14893
14894 return ret;
14895 }
14896
14897 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14898 struct drm_file *file)
14899 {
14900 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14901 struct drm_crtc *drmmode_crtc;
14902 struct intel_crtc *crtc;
14903
14904 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14905 if (!drmmode_crtc)
14906 return -ENOENT;
14907
14908 crtc = to_intel_crtc(drmmode_crtc);
14909 pipe_from_crtc_id->pipe = crtc->pipe;
14910
14911 return 0;
14912 }
14913
14914 static int intel_encoder_clones(struct intel_encoder *encoder)
14915 {
14916 struct drm_device *dev = encoder->base.dev;
14917 struct intel_encoder *source_encoder;
14918 int index_mask = 0;
14919 int entry = 0;
14920
14921 for_each_intel_encoder(dev, source_encoder) {
14922 if (encoders_cloneable(encoder, source_encoder))
14923 index_mask |= (1 << entry);
14924
14925 entry++;
14926 }
14927
14928 return index_mask;
14929 }
14930
14931 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14932 {
14933 if (!IS_MOBILE(dev_priv))
14934 return false;
14935
14936 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14937 return false;
14938
14939 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14940 return false;
14941
14942 return true;
14943 }
14944
14945 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14946 {
14947 if (INTEL_GEN(dev_priv) >= 9)
14948 return false;
14949
14950 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14951 return false;
14952
14953 if (HAS_PCH_LPT_H(dev_priv) &&
14954 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14955 return false;
14956
14957 /* DDI E can't be used if DDI A requires 4 lanes */
14958 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14959 return false;
14960
14961 if (!dev_priv->vbt.int_crt_support)
14962 return false;
14963
14964 return true;
14965 }
14966
14967 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14968 {
14969 int pps_num;
14970 int pps_idx;
14971
14972 if (HAS_DDI(dev_priv))
14973 return;
14974 /*
14975 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14976 * everywhere where registers can be write protected.
14977 */
14978 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14979 pps_num = 2;
14980 else
14981 pps_num = 1;
14982
14983 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14984 u32 val = I915_READ(PP_CONTROL(pps_idx));
14985
14986 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14987 I915_WRITE(PP_CONTROL(pps_idx), val);
14988 }
14989 }
14990
14991 static void intel_pps_init(struct drm_i915_private *dev_priv)
14992 {
14993 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14994 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14995 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14996 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14997 else
14998 dev_priv->pps_mmio_base = PPS_BASE;
14999
15000 intel_pps_unlock_regs_wa(dev_priv);
15001 }
15002
15003 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15004 {
15005 struct intel_encoder *encoder;
15006 bool dpd_is_edp = false;
15007
15008 intel_pps_init(dev_priv);
15009
15010 if (!HAS_DISPLAY(dev_priv))
15011 return;
15012
15013 if (IS_ELKHARTLAKE(dev_priv)) {
15014 intel_ddi_init(dev_priv, PORT_A);
15015 intel_ddi_init(dev_priv, PORT_B);
15016 intel_ddi_init(dev_priv, PORT_C);
15017 icl_dsi_init(dev_priv);
15018 } else if (INTEL_GEN(dev_priv) >= 11) {
15019 intel_ddi_init(dev_priv, PORT_A);
15020 intel_ddi_init(dev_priv, PORT_B);
15021 intel_ddi_init(dev_priv, PORT_C);
15022 intel_ddi_init(dev_priv, PORT_D);
15023 intel_ddi_init(dev_priv, PORT_E);
15024 /*
15025 * On some ICL SKUs port F is not present. No strap bits for
15026 * this, so rely on VBT.
15027 * Work around broken VBTs on SKUs known to have no port F.
15028 */
15029 if (IS_ICL_WITH_PORT_F(dev_priv) &&
15030 intel_bios_is_port_present(dev_priv, PORT_F))
15031 intel_ddi_init(dev_priv, PORT_F);
15032
15033 icl_dsi_init(dev_priv);
15034 } else if (IS_GEN9_LP(dev_priv)) {
15035 /*
15036 * FIXME: Broxton doesn't support port detection via the
15037 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15038 * detect the ports.
15039 */
15040 intel_ddi_init(dev_priv, PORT_A);
15041 intel_ddi_init(dev_priv, PORT_B);
15042 intel_ddi_init(dev_priv, PORT_C);
15043
15044 vlv_dsi_init(dev_priv);
15045 } else if (HAS_DDI(dev_priv)) {
15046 int found;
15047
15048 if (intel_ddi_crt_present(dev_priv))
15049 intel_crt_init(dev_priv);
15050
15051 /*
15052 * Haswell uses DDI functions to detect digital outputs.
15053 * On SKL pre-D0 the strap isn't connected, so we assume
15054 * it's there.
15055 */
15056 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15057 /* WaIgnoreDDIAStrap: skl */
15058 if (found || IS_GEN9_BC(dev_priv))
15059 intel_ddi_init(dev_priv, PORT_A);
15060
15061 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15062 * register */
15063 found = I915_READ(SFUSE_STRAP);
15064
15065 if (found & SFUSE_STRAP_DDIB_DETECTED)
15066 intel_ddi_init(dev_priv, PORT_B);
15067 if (found & SFUSE_STRAP_DDIC_DETECTED)
15068 intel_ddi_init(dev_priv, PORT_C);
15069 if (found & SFUSE_STRAP_DDID_DETECTED)
15070 intel_ddi_init(dev_priv, PORT_D);
15071 if (found & SFUSE_STRAP_DDIF_DETECTED)
15072 intel_ddi_init(dev_priv, PORT_F);
15073 /*
15074 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15075 */
15076 if (IS_GEN9_BC(dev_priv) &&
15077 intel_bios_is_port_present(dev_priv, PORT_E))
15078 intel_ddi_init(dev_priv, PORT_E);
15079
15080 } else if (HAS_PCH_SPLIT(dev_priv)) {
15081 int found;
15082
15083 /*
15084 * intel_edp_init_connector() depends on this completing first,
15085 * to prevent the registration of both eDP and LVDS and the
15086 * incorrect sharing of the PPS.
15087 */
15088 intel_lvds_init(dev_priv);
15089 intel_crt_init(dev_priv);
15090
15091 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
15092
15093 if (ilk_has_edp_a(dev_priv))
15094 intel_dp_init(dev_priv, DP_A, PORT_A);
15095
15096 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15097 /* PCH SDVOB multiplex with HDMIB */
15098 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
15099 if (!found)
15100 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
15101 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15102 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
15103 }
15104
15105 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15106 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
15107
15108 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15109 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
15110
15111 if (I915_READ(PCH_DP_C) & DP_DETECTED)
15112 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
15113
15114 if (I915_READ(PCH_DP_D) & DP_DETECTED)
15115 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
15116 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15117 bool has_edp, has_port;
15118
15119 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15120 intel_crt_init(dev_priv);
15121
15122 /*
15123 * The DP_DETECTED bit is the latched state of the DDC
15124 * SDA pin at boot. However since eDP doesn't require DDC
15125 * (no way to plug in a DP->HDMI dongle) the DDC pins for
15126 * eDP ports may have been muxed to an alternate function.
15127 * Thus we can't rely on the DP_DETECTED bit alone to detect
15128 * eDP ports. Consult the VBT as well as DP_DETECTED to
15129 * detect eDP ports.
15130 *
15131 * Sadly the straps seem to be missing sometimes even for HDMI
15132 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15133 * and VBT for the presence of the port. Additionally we can't
15134 * trust the port type the VBT declares as we've seen at least
15135 * HDMI ports that the VBT claim are DP or eDP.
15136 */
15137 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
15138 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15139 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15140 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
15141 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15142 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
15143
15144 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
15145 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15146 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15147 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
15148 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15149 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
15150
15151 if (IS_CHERRYVIEW(dev_priv)) {
15152 /*
15153 * eDP not supported on port D,
15154 * so no need to worry about it
15155 */
15156 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15157 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15158 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
15159 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15160 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
15161 }
15162
15163 vlv_dsi_init(dev_priv);
15164 } else if (IS_PINEVIEW(dev_priv)) {
15165 intel_lvds_init(dev_priv);
15166 intel_crt_init(dev_priv);
15167 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
15168 bool found = false;
15169
15170 if (IS_MOBILE(dev_priv))
15171 intel_lvds_init(dev_priv);
15172
15173 intel_crt_init(dev_priv);
15174
15175 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15176 DRM_DEBUG_KMS("probing SDVOB\n");
15177 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15178 if (!found && IS_G4X(dev_priv)) {
15179 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15180 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15181 }
15182
15183 if (!found && IS_G4X(dev_priv))
15184 intel_dp_init(dev_priv, DP_B, PORT_B);
15185 }
15186
15187 /* Before G4X SDVOC doesn't have its own detect register */
15188
15189 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15190 DRM_DEBUG_KMS("probing SDVOC\n");
15191 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15192 }
15193
15194 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15195
15196 if (IS_G4X(dev_priv)) {
15197 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15198 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15199 }
15200 if (IS_G4X(dev_priv))
15201 intel_dp_init(dev_priv, DP_C, PORT_C);
15202 }
15203
15204 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15205 intel_dp_init(dev_priv, DP_D, PORT_D);
15206
15207 if (SUPPORTS_TV(dev_priv))
15208 intel_tv_init(dev_priv);
15209 } else if (IS_GEN(dev_priv, 2)) {
15210 if (IS_I85X(dev_priv))
15211 intel_lvds_init(dev_priv);
15212
15213 intel_crt_init(dev_priv);
15214 intel_dvo_init(dev_priv);
15215 }
15216
15217 intel_psr_init(dev_priv);
15218
15219 for_each_intel_encoder(&dev_priv->drm, encoder) {
15220 encoder->base.possible_crtcs = encoder->crtc_mask;
15221 encoder->base.possible_clones =
15222 intel_encoder_clones(encoder);
15223 }
15224
15225 intel_init_pch_refclk(dev_priv);
15226
15227 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15228 }
15229
15230 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15231 {
15232 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15233 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15234
15235 drm_framebuffer_cleanup(fb);
15236
15237 i915_gem_object_lock(obj);
15238 WARN_ON(!obj->framebuffer_references--);
15239 i915_gem_object_unlock(obj);
15240
15241 i915_gem_object_put(obj);
15242
15243 kfree(intel_fb);
15244 }
15245
15246 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15247 struct drm_file *file,
15248 unsigned int *handle)
15249 {
15250 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15251
15252 if (obj->userptr.mm) {
15253 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15254 return -EINVAL;
15255 }
15256
15257 return drm_gem_handle_create(file, &obj->base, handle);
15258 }
15259
15260 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15261 struct drm_file *file,
15262 unsigned flags, unsigned color,
15263 struct drm_clip_rect *clips,
15264 unsigned num_clips)
15265 {
15266 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15267
15268 i915_gem_object_flush_if_display(obj);
15269 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15270
15271 return 0;
15272 }
15273
15274 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15275 .destroy = intel_user_framebuffer_destroy,
15276 .create_handle = intel_user_framebuffer_create_handle,
15277 .dirty = intel_user_framebuffer_dirty,
15278 };
15279
15280 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15281 struct drm_i915_gem_object *obj,
15282 struct drm_mode_fb_cmd2 *mode_cmd)
15283 {
15284 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15285 struct drm_framebuffer *fb = &intel_fb->base;
15286 u32 max_stride;
15287 unsigned int tiling, stride;
15288 int ret = -EINVAL;
15289 int i;
15290
15291 i915_gem_object_lock(obj);
15292 obj->framebuffer_references++;
15293 tiling = i915_gem_object_get_tiling(obj);
15294 stride = i915_gem_object_get_stride(obj);
15295 i915_gem_object_unlock(obj);
15296
15297 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15298 /*
15299 * If there's a fence, enforce that
15300 * the fb modifier and tiling mode match.
15301 */
15302 if (tiling != I915_TILING_NONE &&
15303 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15304 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15305 goto err;
15306 }
15307 } else {
15308 if (tiling == I915_TILING_X) {
15309 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15310 } else if (tiling == I915_TILING_Y) {
15311 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15312 goto err;
15313 }
15314 }
15315
15316 if (!drm_any_plane_has_format(&dev_priv->drm,
15317 mode_cmd->pixel_format,
15318 mode_cmd->modifier[0])) {
15319 struct drm_format_name_buf format_name;
15320
15321 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15322 drm_get_format_name(mode_cmd->pixel_format,
15323 &format_name),
15324 mode_cmd->modifier[0]);
15325 goto err;
15326 }
15327
15328 /*
15329 * gen2/3 display engine uses the fence if present,
15330 * so the tiling mode must match the fb modifier exactly.
15331 */
15332 if (INTEL_GEN(dev_priv) < 4 &&
15333 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15334 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15335 goto err;
15336 }
15337
15338 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15339 mode_cmd->modifier[0]);
15340 if (mode_cmd->pitches[0] > max_stride) {
15341 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15342 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15343 "tiled" : "linear",
15344 mode_cmd->pitches[0], max_stride);
15345 goto err;
15346 }
15347
15348 /*
15349 * If there's a fence, enforce that
15350 * the fb pitch and fence stride match.
15351 */
15352 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15353 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15354 mode_cmd->pitches[0], stride);
15355 goto err;
15356 }
15357
15358 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15359 if (mode_cmd->offsets[0] != 0)
15360 goto err;
15361
15362 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15363
15364 for (i = 0; i < fb->format->num_planes; i++) {
15365 u32 stride_alignment;
15366
15367 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15368 DRM_DEBUG_KMS("bad plane %d handle\n", i);
15369 goto err;
15370 }
15371
15372 stride_alignment = intel_fb_stride_alignment(fb, i);
15373
15374 /*
15375 * Display WA #0531: skl,bxt,kbl,glk
15376 *
15377 * Render decompression and plane width > 3840
15378 * combined with horizontal panning requires the
15379 * plane stride to be a multiple of 4. We'll just
15380 * require the entire fb to accommodate that to avoid
15381 * potential runtime errors at plane configuration time.
15382 */
15383 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15384 is_ccs_modifier(fb->modifier))
15385 stride_alignment *= 4;
15386
15387 if (fb->pitches[i] & (stride_alignment - 1)) {
15388 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15389 i, fb->pitches[i], stride_alignment);
15390 goto err;
15391 }
15392
15393 fb->obj[i] = &obj->base;
15394 }
15395
15396 ret = intel_fill_fb_info(dev_priv, fb);
15397 if (ret)
15398 goto err;
15399
15400 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15401 if (ret) {
15402 DRM_ERROR("framebuffer init failed %d\n", ret);
15403 goto err;
15404 }
15405
15406 return 0;
15407
15408 err:
15409 i915_gem_object_lock(obj);
15410 obj->framebuffer_references--;
15411 i915_gem_object_unlock(obj);
15412 return ret;
15413 }
15414
15415 static struct drm_framebuffer *
15416 intel_user_framebuffer_create(struct drm_device *dev,
15417 struct drm_file *filp,
15418 const struct drm_mode_fb_cmd2 *user_mode_cmd)
15419 {
15420 struct drm_framebuffer *fb;
15421 struct drm_i915_gem_object *obj;
15422 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15423
15424 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15425 if (!obj)
15426 return ERR_PTR(-ENOENT);
15427
15428 fb = intel_framebuffer_create(obj, &mode_cmd);
15429 if (IS_ERR(fb))
15430 i915_gem_object_put(obj);
15431
15432 return fb;
15433 }
15434
15435 static void intel_atomic_state_free(struct drm_atomic_state *state)
15436 {
15437 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15438
15439 drm_atomic_state_default_release(state);
15440
15441 i915_sw_fence_fini(&intel_state->commit_ready);
15442
15443 kfree(state);
15444 }
15445
15446 static enum drm_mode_status
15447 intel_mode_valid(struct drm_device *dev,
15448 const struct drm_display_mode *mode)
15449 {
15450 struct drm_i915_private *dev_priv = to_i915(dev);
15451 int hdisplay_max, htotal_max;
15452 int vdisplay_max, vtotal_max;
15453
15454 /*
15455 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15456 * of DBLSCAN modes to the output's mode list when they detect
15457 * the scaling mode property on the connector. And they don't
15458 * ask the kernel to validate those modes in any way until
15459 * modeset time at which point the client gets a protocol error.
15460 * So in order to not upset those clients we silently ignore the
15461 * DBLSCAN flag on such connectors. For other connectors we will
15462 * reject modes with the DBLSCAN flag in encoder->compute_config().
15463 * And we always reject DBLSCAN modes in connector->mode_valid()
15464 * as we never want such modes on the connector's mode list.
15465 */
15466
15467 if (mode->vscan > 1)
15468 return MODE_NO_VSCAN;
15469
15470 if (mode->flags & DRM_MODE_FLAG_HSKEW)
15471 return MODE_H_ILLEGAL;
15472
15473 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15474 DRM_MODE_FLAG_NCSYNC |
15475 DRM_MODE_FLAG_PCSYNC))
15476 return MODE_HSYNC;
15477
15478 if (mode->flags & (DRM_MODE_FLAG_BCAST |
15479 DRM_MODE_FLAG_PIXMUX |
15480 DRM_MODE_FLAG_CLKDIV2))
15481 return MODE_BAD;
15482
15483 if (INTEL_GEN(dev_priv) >= 9 ||
15484 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15485 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15486 vdisplay_max = 4096;
15487 htotal_max = 8192;
15488 vtotal_max = 8192;
15489 } else if (INTEL_GEN(dev_priv) >= 3) {
15490 hdisplay_max = 4096;
15491 vdisplay_max = 4096;
15492 htotal_max = 8192;
15493 vtotal_max = 8192;
15494 } else {
15495 hdisplay_max = 2048;
15496 vdisplay_max = 2048;
15497 htotal_max = 4096;
15498 vtotal_max = 4096;
15499 }
15500
15501 if (mode->hdisplay > hdisplay_max ||
15502 mode->hsync_start > htotal_max ||
15503 mode->hsync_end > htotal_max ||
15504 mode->htotal > htotal_max)
15505 return MODE_H_ILLEGAL;
15506
15507 if (mode->vdisplay > vdisplay_max ||
15508 mode->vsync_start > vtotal_max ||
15509 mode->vsync_end > vtotal_max ||
15510 mode->vtotal > vtotal_max)
15511 return MODE_V_ILLEGAL;
15512
15513 return MODE_OK;
15514 }
15515
15516 static const struct drm_mode_config_funcs intel_mode_funcs = {
15517 .fb_create = intel_user_framebuffer_create,
15518 .get_format_info = intel_get_format_info,
15519 .output_poll_changed = intel_fbdev_output_poll_changed,
15520 .mode_valid = intel_mode_valid,
15521 .atomic_check = intel_atomic_check,
15522 .atomic_commit = intel_atomic_commit,
15523 .atomic_state_alloc = intel_atomic_state_alloc,
15524 .atomic_state_clear = intel_atomic_state_clear,
15525 .atomic_state_free = intel_atomic_state_free,
15526 };
15527
15528 /**
15529 * intel_init_display_hooks - initialize the display modesetting hooks
15530 * @dev_priv: device private
15531 */
15532 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15533 {
15534 intel_init_cdclk_hooks(dev_priv);
15535
15536 if (INTEL_GEN(dev_priv) >= 9) {
15537 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15538 dev_priv->display.get_initial_plane_config =
15539 skylake_get_initial_plane_config;
15540 dev_priv->display.crtc_compute_clock =
15541 haswell_crtc_compute_clock;
15542 dev_priv->display.crtc_enable = haswell_crtc_enable;
15543 dev_priv->display.crtc_disable = haswell_crtc_disable;
15544 } else if (HAS_DDI(dev_priv)) {
15545 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15546 dev_priv->display.get_initial_plane_config =
15547 i9xx_get_initial_plane_config;
15548 dev_priv->display.crtc_compute_clock =
15549 haswell_crtc_compute_clock;
15550 dev_priv->display.crtc_enable = haswell_crtc_enable;
15551 dev_priv->display.crtc_disable = haswell_crtc_disable;
15552 } else if (HAS_PCH_SPLIT(dev_priv)) {
15553 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15554 dev_priv->display.get_initial_plane_config =
15555 i9xx_get_initial_plane_config;
15556 dev_priv->display.crtc_compute_clock =
15557 ironlake_crtc_compute_clock;
15558 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15559 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15560 } else if (IS_CHERRYVIEW(dev_priv)) {
15561 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15562 dev_priv->display.get_initial_plane_config =
15563 i9xx_get_initial_plane_config;
15564 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15565 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15566 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15567 } else if (IS_VALLEYVIEW(dev_priv)) {
15568 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15569 dev_priv->display.get_initial_plane_config =
15570 i9xx_get_initial_plane_config;
15571 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15572 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15573 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15574 } else if (IS_G4X(dev_priv)) {
15575 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15576 dev_priv->display.get_initial_plane_config =
15577 i9xx_get_initial_plane_config;
15578 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15579 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15580 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15581 } else if (IS_PINEVIEW(dev_priv)) {
15582 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15583 dev_priv->display.get_initial_plane_config =
15584 i9xx_get_initial_plane_config;
15585 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15586 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15587 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15588 } else if (!IS_GEN(dev_priv, 2)) {
15589 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15590 dev_priv->display.get_initial_plane_config =
15591 i9xx_get_initial_plane_config;
15592 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15593 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15594 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15595 } else {
15596 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15597 dev_priv->display.get_initial_plane_config =
15598 i9xx_get_initial_plane_config;
15599 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15600 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15601 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15602 }
15603
15604 if (IS_GEN(dev_priv, 5)) {
15605 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15606 } else if (IS_GEN(dev_priv, 6)) {
15607 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15608 } else if (IS_IVYBRIDGE(dev_priv)) {
15609 /* FIXME: detect B0+ stepping and use auto training */
15610 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15611 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15612 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15613 }
15614
15615 if (INTEL_GEN(dev_priv) >= 9)
15616 dev_priv->display.update_crtcs = skl_update_crtcs;
15617 else
15618 dev_priv->display.update_crtcs = intel_update_crtcs;
15619 }
15620
15621 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
15622 {
15623 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15624 return VLV_VGACNTRL;
15625 else if (INTEL_GEN(dev_priv) >= 5)
15626 return CPU_VGACNTRL;
15627 else
15628 return VGACNTRL;
15629 }
15630
15631 /* Disable the VGA plane that we never use */
15632 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15633 {
15634 struct pci_dev *pdev = dev_priv->drm.pdev;
15635 u8 sr1;
15636 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15637
15638 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15639 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15640 outb(SR01, VGA_SR_INDEX);
15641 sr1 = inb(VGA_SR_DATA);
15642 outb(sr1 | 1<<5, VGA_SR_DATA);
15643 vga_put(pdev, VGA_RSRC_LEGACY_IO);
15644 udelay(300);
15645
15646 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15647 POSTING_READ(vga_reg);
15648 }
15649
15650 void intel_modeset_init_hw(struct drm_device *dev)
15651 {
15652 struct drm_i915_private *dev_priv = to_i915(dev);
15653
15654 intel_update_cdclk(dev_priv);
15655 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15656 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15657 }
15658
15659 /*
15660 * Calculate what we think the watermarks should be for the state we've read
15661 * out of the hardware and then immediately program those watermarks so that
15662 * we ensure the hardware settings match our internal state.
15663 *
15664 * We can calculate what we think WM's should be by creating a duplicate of the
15665 * current state (which was constructed during hardware readout) and running it
15666 * through the atomic check code to calculate new watermark values in the
15667 * state object.
15668 */
15669 static void sanitize_watermarks(struct drm_device *dev)
15670 {
15671 struct drm_i915_private *dev_priv = to_i915(dev);
15672 struct drm_atomic_state *state;
15673 struct intel_atomic_state *intel_state;
15674 struct drm_crtc *crtc;
15675 struct drm_crtc_state *cstate;
15676 struct drm_modeset_acquire_ctx ctx;
15677 int ret;
15678 int i;
15679
15680 /* Only supported on platforms that use atomic watermark design */
15681 if (!dev_priv->display.optimize_watermarks)
15682 return;
15683
15684 /*
15685 * We need to hold connection_mutex before calling duplicate_state so
15686 * that the connector loop is protected.
15687 */
15688 drm_modeset_acquire_init(&ctx, 0);
15689 retry:
15690 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15691 if (ret == -EDEADLK) {
15692 drm_modeset_backoff(&ctx);
15693 goto retry;
15694 } else if (WARN_ON(ret)) {
15695 goto fail;
15696 }
15697
15698 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15699 if (WARN_ON(IS_ERR(state)))
15700 goto fail;
15701
15702 intel_state = to_intel_atomic_state(state);
15703
15704 /*
15705 * Hardware readout is the only time we don't want to calculate
15706 * intermediate watermarks (since we don't trust the current
15707 * watermarks).
15708 */
15709 if (!HAS_GMCH(dev_priv))
15710 intel_state->skip_intermediate_wm = true;
15711
15712 ret = intel_atomic_check(dev, state);
15713 if (ret) {
15714 /*
15715 * If we fail here, it means that the hardware appears to be
15716 * programmed in a way that shouldn't be possible, given our
15717 * understanding of watermark requirements. This might mean a
15718 * mistake in the hardware readout code or a mistake in the
15719 * watermark calculations for a given platform. Raise a WARN
15720 * so that this is noticeable.
15721 *
15722 * If this actually happens, we'll have to just leave the
15723 * BIOS-programmed watermarks untouched and hope for the best.
15724 */
15725 WARN(true, "Could not determine valid watermarks for inherited state\n");
15726 goto put_state;
15727 }
15728
15729 /* Write calculated watermark values back */
15730 for_each_new_crtc_in_state(state, crtc, cstate, i) {
15731 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15732
15733 cs->wm.need_postvbl_update = true;
15734 dev_priv->display.optimize_watermarks(intel_state, cs);
15735
15736 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15737 }
15738
15739 put_state:
15740 drm_atomic_state_put(state);
15741 fail:
15742 drm_modeset_drop_locks(&ctx);
15743 drm_modeset_acquire_fini(&ctx);
15744 }
15745
15746 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15747 {
15748 if (IS_GEN(dev_priv, 5)) {
15749 u32 fdi_pll_clk =
15750 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15751
15752 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15753 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15754 dev_priv->fdi_pll_freq = 270000;
15755 } else {
15756 return;
15757 }
15758
15759 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15760 }
15761
15762 static int intel_initial_commit(struct drm_device *dev)
15763 {
15764 struct drm_atomic_state *state = NULL;
15765 struct drm_modeset_acquire_ctx ctx;
15766 struct drm_crtc *crtc;
15767 struct drm_crtc_state *crtc_state;
15768 int ret = 0;
15769
15770 state = drm_atomic_state_alloc(dev);
15771 if (!state)
15772 return -ENOMEM;
15773
15774 drm_modeset_acquire_init(&ctx, 0);
15775
15776 retry:
15777 state->acquire_ctx = &ctx;
15778
15779 drm_for_each_crtc(crtc, dev) {
15780 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15781 if (IS_ERR(crtc_state)) {
15782 ret = PTR_ERR(crtc_state);
15783 goto out;
15784 }
15785
15786 if (crtc_state->active) {
15787 ret = drm_atomic_add_affected_planes(state, crtc);
15788 if (ret)
15789 goto out;
15790
15791 /*
15792 * FIXME hack to force a LUT update to avoid the
15793 * plane update forcing the pipe gamma on without
15794 * having a proper LUT loaded. Remove once we
15795 * have readout for pipe gamma enable.
15796 */
15797 crtc_state->color_mgmt_changed = true;
15798 }
15799 }
15800
15801 ret = drm_atomic_commit(state);
15802
15803 out:
15804 if (ret == -EDEADLK) {
15805 drm_atomic_state_clear(state);
15806 drm_modeset_backoff(&ctx);
15807 goto retry;
15808 }
15809
15810 drm_atomic_state_put(state);
15811
15812 drm_modeset_drop_locks(&ctx);
15813 drm_modeset_acquire_fini(&ctx);
15814
15815 return ret;
15816 }
15817
15818 int intel_modeset_init(struct drm_device *dev)
15819 {
15820 struct drm_i915_private *dev_priv = to_i915(dev);
15821 struct i915_ggtt *ggtt = &dev_priv->ggtt;
15822 enum pipe pipe;
15823 struct intel_crtc *crtc;
15824 int ret;
15825
15826 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15827
15828 drm_mode_config_init(dev);
15829
15830 ret = intel_bw_init(dev_priv);
15831 if (ret)
15832 return ret;
15833
15834 dev->mode_config.min_width = 0;
15835 dev->mode_config.min_height = 0;
15836
15837 dev->mode_config.preferred_depth = 24;
15838 dev->mode_config.prefer_shadow = 1;
15839
15840 dev->mode_config.allow_fb_modifiers = true;
15841
15842 dev->mode_config.funcs = &intel_mode_funcs;
15843
15844 init_llist_head(&dev_priv->atomic_helper.free_list);
15845 INIT_WORK(&dev_priv->atomic_helper.free_work,
15846 intel_atomic_helper_free_state_worker);
15847
15848 intel_init_quirks(dev_priv);
15849
15850 intel_fbc_init(dev_priv);
15851
15852 intel_init_pm(dev_priv);
15853
15854 /*
15855 * There may be no VBT; and if the BIOS enabled SSC we can
15856 * just keep using it to avoid unnecessary flicker. Whereas if the
15857 * BIOS isn't using it, don't assume it will work even if the VBT
15858 * indicates as much.
15859 */
15860 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15861 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15862 DREF_SSC1_ENABLE);
15863
15864 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15865 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15866 bios_lvds_use_ssc ? "en" : "dis",
15867 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15868 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15869 }
15870 }
15871
15872 /*
15873 * Maximum framebuffer dimensions, chosen to match
15874 * the maximum render engine surface size on gen4+.
15875 */
15876 if (INTEL_GEN(dev_priv) >= 7) {
15877 dev->mode_config.max_width = 16384;
15878 dev->mode_config.max_height = 16384;
15879 } else if (INTEL_GEN(dev_priv) >= 4) {
15880 dev->mode_config.max_width = 8192;
15881 dev->mode_config.max_height = 8192;
15882 } else if (IS_GEN(dev_priv, 3)) {
15883 dev->mode_config.max_width = 4096;
15884 dev->mode_config.max_height = 4096;
15885 } else {
15886 dev->mode_config.max_width = 2048;
15887 dev->mode_config.max_height = 2048;
15888 }
15889
15890 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15891 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15892 dev->mode_config.cursor_height = 1023;
15893 } else if (IS_GEN(dev_priv, 2)) {
15894 dev->mode_config.cursor_width = 64;
15895 dev->mode_config.cursor_height = 64;
15896 } else {
15897 dev->mode_config.cursor_width = 256;
15898 dev->mode_config.cursor_height = 256;
15899 }
15900
15901 dev->mode_config.fb_base = ggtt->gmadr.start;
15902
15903 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15904 INTEL_INFO(dev_priv)->num_pipes,
15905 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15906
15907 for_each_pipe(dev_priv, pipe) {
15908 ret = intel_crtc_init(dev_priv, pipe);
15909 if (ret) {
15910 drm_mode_config_cleanup(dev);
15911 return ret;
15912 }
15913 }
15914
15915 intel_shared_dpll_init(dev);
15916 intel_update_fdi_pll_freq(dev_priv);
15917
15918 intel_update_czclk(dev_priv);
15919 intel_modeset_init_hw(dev);
15920
15921 intel_hdcp_component_init(dev_priv);
15922
15923 if (dev_priv->max_cdclk_freq == 0)
15924 intel_update_max_cdclk(dev_priv);
15925
15926 /* Just disable it once at startup */
15927 i915_disable_vga(dev_priv);
15928 intel_setup_outputs(dev_priv);
15929
15930 drm_modeset_lock_all(dev);
15931 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15932 drm_modeset_unlock_all(dev);
15933
15934 for_each_intel_crtc(dev, crtc) {
15935 struct intel_initial_plane_config plane_config = {};
15936
15937 if (!crtc->active)
15938 continue;
15939
15940 /*
15941 * Note that reserving the BIOS fb up front prevents us
15942 * from stuffing other stolen allocations like the ring
15943 * on top. This prevents some ugliness at boot time, and
15944 * can even allow for smooth boot transitions if the BIOS
15945 * fb is large enough for the active pipe configuration.
15946 */
15947 dev_priv->display.get_initial_plane_config(crtc,
15948 &plane_config);
15949
15950 /*
15951 * If the fb is shared between multiple heads, we'll
15952 * just get the first one.
15953 */
15954 intel_find_initial_plane_obj(crtc, &plane_config);
15955 }
15956
15957 /*
15958 * Make sure hardware watermarks really match the state we read out.
15959 * Note that we need to do this after reconstructing the BIOS fb's
15960 * since the watermark calculation done here will use pstate->fb.
15961 */
15962 if (!HAS_GMCH(dev_priv))
15963 sanitize_watermarks(dev);
15964
15965 /*
15966 * Force all active planes to recompute their states. So that on
15967 * mode_setcrtc after probe, all the intel_plane_state variables
15968 * are already calculated and there is no assert_plane warnings
15969 * during bootup.
15970 */
15971 ret = intel_initial_commit(dev);
15972 if (ret)
15973 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15974
15975 return 0;
15976 }
15977
15978 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15979 {
15980 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15981 /* 640x480@60Hz, ~25175 kHz */
15982 struct dpll clock = {
15983 .m1 = 18,
15984 .m2 = 7,
15985 .p1 = 13,
15986 .p2 = 4,
15987 .n = 2,
15988 };
15989 u32 dpll, fp;
15990 int i;
15991
15992 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15993
15994 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15995 pipe_name(pipe), clock.vco, clock.dot);
15996
15997 fp = i9xx_dpll_compute_fp(&clock);
15998 dpll = DPLL_DVO_2X_MODE |
15999 DPLL_VGA_MODE_DIS |
16000 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16001 PLL_P2_DIVIDE_BY_4 |
16002 PLL_REF_INPUT_DREFCLK |
16003 DPLL_VCO_ENABLE;
16004
16005 I915_WRITE(FP0(pipe), fp);
16006 I915_WRITE(FP1(pipe), fp);
16007
16008 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16009 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16010 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16011 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16012 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16013 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16014 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16015
16016 /*
16017 * Apparently we need to have VGA mode enabled prior to changing
16018 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16019 * dividers, even though the register value does change.
16020 */
16021 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16022 I915_WRITE(DPLL(pipe), dpll);
16023
16024 /* Wait for the clocks to stabilize. */
16025 POSTING_READ(DPLL(pipe));
16026 udelay(150);
16027
16028 /* The pixel multiplier can only be updated once the
16029 * DPLL is enabled and the clocks are stable.
16030 *
16031 * So write it again.
16032 */
16033 I915_WRITE(DPLL(pipe), dpll);
16034
16035 /* We do this three times for luck */
16036 for (i = 0; i < 3 ; i++) {
16037 I915_WRITE(DPLL(pipe), dpll);
16038 POSTING_READ(DPLL(pipe));
16039 udelay(150); /* wait for warmup */
16040 }
16041
16042 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16043 POSTING_READ(PIPECONF(pipe));
16044
16045 intel_wait_for_pipe_scanline_moving(crtc);
16046 }
16047
16048 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16049 {
16050 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16051
16052 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16053 pipe_name(pipe));
16054
16055 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16056 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16057 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
16058 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16059 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
16060
16061 I915_WRITE(PIPECONF(pipe), 0);
16062 POSTING_READ(PIPECONF(pipe));
16063
16064 intel_wait_for_pipe_scanline_stopped(crtc);
16065
16066 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16067 POSTING_READ(DPLL(pipe));
16068 }
16069
16070 static void
16071 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16072 {
16073 struct intel_crtc *crtc;
16074
16075 if (INTEL_GEN(dev_priv) >= 4)
16076 return;
16077
16078 for_each_intel_crtc(&dev_priv->drm, crtc) {
16079 struct intel_plane *plane =
16080 to_intel_plane(crtc->base.primary);
16081 struct intel_crtc *plane_crtc;
16082 enum pipe pipe;
16083
16084 if (!plane->get_hw_state(plane, &pipe))
16085 continue;
16086
16087 if (pipe == crtc->pipe)
16088 continue;
16089
16090 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16091 plane->base.base.id, plane->base.name);
16092
16093 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16094 intel_plane_disable_noatomic(plane_crtc, plane);
16095 }
16096 }
16097
16098 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16099 {
16100 struct drm_device *dev = crtc->base.dev;
16101 struct intel_encoder *encoder;
16102
16103 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16104 return true;
16105
16106 return false;
16107 }
16108
16109 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16110 {
16111 struct drm_device *dev = encoder->base.dev;
16112 struct intel_connector *connector;
16113
16114 for_each_connector_on_encoder(dev, &encoder->base, connector)
16115 return connector;
16116
16117 return NULL;
16118 }
16119
16120 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16121 enum pipe pch_transcoder)
16122 {
16123 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16124 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
16125 }
16126
16127 static void intel_sanitize_crtc(struct intel_crtc *crtc,
16128 struct drm_modeset_acquire_ctx *ctx)
16129 {
16130 struct drm_device *dev = crtc->base.dev;
16131 struct drm_i915_private *dev_priv = to_i915(dev);
16132 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16133 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
16134
16135 /* Clear any frame start delays used for debugging left by the BIOS */
16136 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
16137 i915_reg_t reg = PIPECONF(cpu_transcoder);
16138
16139 I915_WRITE(reg,
16140 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16141 }
16142
16143 if (crtc_state->base.active) {
16144 struct intel_plane *plane;
16145
16146 /* Disable everything but the primary plane */
16147 for_each_intel_plane_on_crtc(dev, crtc, plane) {
16148 const struct intel_plane_state *plane_state =
16149 to_intel_plane_state(plane->base.state);
16150
16151 if (plane_state->base.visible &&
16152 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16153 intel_plane_disable_noatomic(crtc, plane);
16154 }
16155
16156 /*
16157 * Disable any background color set by the BIOS, but enable the
16158 * gamma and CSC to match how we program our planes.
16159 */
16160 if (INTEL_GEN(dev_priv) >= 9)
16161 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16162 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16163 SKL_BOTTOM_COLOR_CSC_ENABLE);
16164 }
16165
16166 /* Adjust the state of the output pipe according to whether we
16167 * have active connectors/encoders. */
16168 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
16169 intel_crtc_disable_noatomic(&crtc->base, ctx);
16170
16171 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
16172 /*
16173 * We start out with underrun reporting disabled to avoid races.
16174 * For correct bookkeeping mark this on active crtcs.
16175 *
16176 * Also on gmch platforms we dont have any hardware bits to
16177 * disable the underrun reporting. Which means we need to start
16178 * out with underrun reporting disabled also on inactive pipes,
16179 * since otherwise we'll complain about the garbage we read when
16180 * e.g. coming up after runtime pm.
16181 *
16182 * No protection against concurrent access is required - at
16183 * worst a fifo underrun happens which also sets this to false.
16184 */
16185 crtc->cpu_fifo_underrun_disabled = true;
16186 /*
16187 * We track the PCH trancoder underrun reporting state
16188 * within the crtc. With crtc for pipe A housing the underrun
16189 * reporting state for PCH transcoder A, crtc for pipe B housing
16190 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16191 * and marking underrun reporting as disabled for the non-existing
16192 * PCH transcoders B and C would prevent enabling the south
16193 * error interrupt (see cpt_can_enable_serr_int()).
16194 */
16195 if (has_pch_trancoder(dev_priv, crtc->pipe))
16196 crtc->pch_fifo_underrun_disabled = true;
16197 }
16198 }
16199
16200 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16201 {
16202 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16203
16204 /*
16205 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16206 * the hardware when a high res displays plugged in. DPLL P
16207 * divider is zero, and the pipe timings are bonkers. We'll
16208 * try to disable everything in that case.
16209 *
16210 * FIXME would be nice to be able to sanitize this state
16211 * without several WARNs, but for now let's take the easy
16212 * road.
16213 */
16214 return IS_GEN(dev_priv, 6) &&
16215 crtc_state->base.active &&
16216 crtc_state->shared_dpll &&
16217 crtc_state->port_clock == 0;
16218 }
16219
16220 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16221 {
16222 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16223 struct intel_connector *connector;
16224 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16225 struct intel_crtc_state *crtc_state = crtc ?
16226 to_intel_crtc_state(crtc->base.state) : NULL;
16227
16228 /* We need to check both for a crtc link (meaning that the
16229 * encoder is active and trying to read from a pipe) and the
16230 * pipe itself being active. */
16231 bool has_active_crtc = crtc_state &&
16232 crtc_state->base.active;
16233
16234 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16235 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16236 pipe_name(crtc->pipe));
16237 has_active_crtc = false;
16238 }
16239
16240 connector = intel_encoder_find_connector(encoder);
16241 if (connector && !has_active_crtc) {
16242 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16243 encoder->base.base.id,
16244 encoder->base.name);
16245
16246 /* Connector is active, but has no active pipe. This is
16247 * fallout from our resume register restoring. Disable
16248 * the encoder manually again. */
16249 if (crtc_state) {
16250 struct drm_encoder *best_encoder;
16251
16252 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16253 encoder->base.base.id,
16254 encoder->base.name);
16255
16256 /* avoid oopsing in case the hooks consult best_encoder */
16257 best_encoder = connector->base.state->best_encoder;
16258 connector->base.state->best_encoder = &encoder->base;
16259
16260 if (encoder->disable)
16261 encoder->disable(encoder, crtc_state,
16262 connector->base.state);
16263 if (encoder->post_disable)
16264 encoder->post_disable(encoder, crtc_state,
16265 connector->base.state);
16266
16267 connector->base.state->best_encoder = best_encoder;
16268 }
16269 encoder->base.crtc = NULL;
16270
16271 /* Inconsistent output/port/pipe state happens presumably due to
16272 * a bug in one of the get_hw_state functions. Or someplace else
16273 * in our code, like the register restore mess on resume. Clamp
16274 * things to off as a safer default. */
16275
16276 connector->base.dpms = DRM_MODE_DPMS_OFF;
16277 connector->base.encoder = NULL;
16278 }
16279
16280 /* notify opregion of the sanitized encoder state */
16281 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16282
16283 if (INTEL_GEN(dev_priv) >= 11)
16284 icl_sanitize_encoder_pll_mapping(encoder);
16285 }
16286
16287 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16288 {
16289 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16290
16291 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16292 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16293 i915_disable_vga(dev_priv);
16294 }
16295 }
16296
16297 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16298 {
16299 intel_wakeref_t wakeref;
16300
16301 /*
16302 * This function can be called both from intel_modeset_setup_hw_state or
16303 * at a very early point in our resume sequence, where the power well
16304 * structures are not yet restored. Since this function is at a very
16305 * paranoid "someone might have enabled VGA while we were not looking"
16306 * level, just check if the power well is enabled instead of trying to
16307 * follow the "don't touch the power well if we don't need it" policy
16308 * the rest of the driver uses.
16309 */
16310 wakeref = intel_display_power_get_if_enabled(dev_priv,
16311 POWER_DOMAIN_VGA);
16312 if (!wakeref)
16313 return;
16314
16315 i915_redisable_vga_power_on(dev_priv);
16316
16317 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16318 }
16319
16320 /* FIXME read out full plane state for all planes */
16321 static void readout_plane_state(struct drm_i915_private *dev_priv)
16322 {
16323 struct intel_plane *plane;
16324 struct intel_crtc *crtc;
16325
16326 for_each_intel_plane(&dev_priv->drm, plane) {
16327 struct intel_plane_state *plane_state =
16328 to_intel_plane_state(plane->base.state);
16329 struct intel_crtc_state *crtc_state;
16330 enum pipe pipe = PIPE_A;
16331 bool visible;
16332
16333 visible = plane->get_hw_state(plane, &pipe);
16334
16335 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16336 crtc_state = to_intel_crtc_state(crtc->base.state);
16337
16338 intel_set_plane_visible(crtc_state, plane_state, visible);
16339
16340 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16341 plane->base.base.id, plane->base.name,
16342 enableddisabled(visible), pipe_name(pipe));
16343 }
16344
16345 for_each_intel_crtc(&dev_priv->drm, crtc) {
16346 struct intel_crtc_state *crtc_state =
16347 to_intel_crtc_state(crtc->base.state);
16348
16349 fixup_active_planes(crtc_state);
16350 }
16351 }
16352
16353 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16354 {
16355 struct drm_i915_private *dev_priv = to_i915(dev);
16356 enum pipe pipe;
16357 struct intel_crtc *crtc;
16358 struct intel_encoder *encoder;
16359 struct intel_connector *connector;
16360 struct drm_connector_list_iter conn_iter;
16361 int i;
16362
16363 dev_priv->active_crtcs = 0;
16364
16365 for_each_intel_crtc(dev, crtc) {
16366 struct intel_crtc_state *crtc_state =
16367 to_intel_crtc_state(crtc->base.state);
16368
16369 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16370 memset(crtc_state, 0, sizeof(*crtc_state));
16371 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
16372
16373 crtc_state->base.active = crtc_state->base.enable =
16374 dev_priv->display.get_pipe_config(crtc, crtc_state);
16375
16376 crtc->base.enabled = crtc_state->base.enable;
16377 crtc->active = crtc_state->base.active;
16378
16379 if (crtc_state->base.active)
16380 dev_priv->active_crtcs |= 1 << crtc->pipe;
16381
16382 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16383 crtc->base.base.id, crtc->base.name,
16384 enableddisabled(crtc_state->base.active));
16385 }
16386
16387 readout_plane_state(dev_priv);
16388
16389 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16390 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16391
16392 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16393 &pll->state.hw_state);
16394 pll->state.crtc_mask = 0;
16395 for_each_intel_crtc(dev, crtc) {
16396 struct intel_crtc_state *crtc_state =
16397 to_intel_crtc_state(crtc->base.state);
16398
16399 if (crtc_state->base.active &&
16400 crtc_state->shared_dpll == pll)
16401 pll->state.crtc_mask |= 1 << crtc->pipe;
16402 }
16403 pll->active_mask = pll->state.crtc_mask;
16404
16405 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16406 pll->info->name, pll->state.crtc_mask, pll->on);
16407 }
16408
16409 for_each_intel_encoder(dev, encoder) {
16410 pipe = 0;
16411
16412 if (encoder->get_hw_state(encoder, &pipe)) {
16413 struct intel_crtc_state *crtc_state;
16414
16415 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16416 crtc_state = to_intel_crtc_state(crtc->base.state);
16417
16418 encoder->base.crtc = &crtc->base;
16419 encoder->get_config(encoder, crtc_state);
16420 } else {
16421 encoder->base.crtc = NULL;
16422 }
16423
16424 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16425 encoder->base.base.id, encoder->base.name,
16426 enableddisabled(encoder->base.crtc),
16427 pipe_name(pipe));
16428 }
16429
16430 drm_connector_list_iter_begin(dev, &conn_iter);
16431 for_each_intel_connector_iter(connector, &conn_iter) {
16432 if (connector->get_hw_state(connector)) {
16433 connector->base.dpms = DRM_MODE_DPMS_ON;
16434
16435 encoder = connector->encoder;
16436 connector->base.encoder = &encoder->base;
16437
16438 if (encoder->base.crtc &&
16439 encoder->base.crtc->state->active) {
16440 /*
16441 * This has to be done during hardware readout
16442 * because anything calling .crtc_disable may
16443 * rely on the connector_mask being accurate.
16444 */
16445 encoder->base.crtc->state->connector_mask |=
16446 drm_connector_mask(&connector->base);
16447 encoder->base.crtc->state->encoder_mask |=
16448 drm_encoder_mask(&encoder->base);
16449 }
16450
16451 } else {
16452 connector->base.dpms = DRM_MODE_DPMS_OFF;
16453 connector->base.encoder = NULL;
16454 }
16455 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16456 connector->base.base.id, connector->base.name,
16457 enableddisabled(connector->base.encoder));
16458 }
16459 drm_connector_list_iter_end(&conn_iter);
16460
16461 for_each_intel_crtc(dev, crtc) {
16462 struct intel_bw_state *bw_state =
16463 to_intel_bw_state(dev_priv->bw_obj.state);
16464 struct intel_crtc_state *crtc_state =
16465 to_intel_crtc_state(crtc->base.state);
16466 struct intel_plane *plane;
16467 int min_cdclk = 0;
16468
16469 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16470 if (crtc_state->base.active) {
16471 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16472 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16473 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16474 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16475 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16476
16477 /*
16478 * The initial mode needs to be set in order to keep
16479 * the atomic core happy. It wants a valid mode if the
16480 * crtc's enabled, so we do the above call.
16481 *
16482 * But we don't set all the derived state fully, hence
16483 * set a flag to indicate that a full recalculation is
16484 * needed on the next commit.
16485 */
16486 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16487
16488 intel_crtc_compute_pixel_rate(crtc_state);
16489
16490 if (dev_priv->display.modeset_calc_cdclk) {
16491 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16492 if (WARN_ON(min_cdclk < 0))
16493 min_cdclk = 0;
16494 }
16495
16496 drm_calc_timestamping_constants(&crtc->base,
16497 &crtc_state->base.adjusted_mode);
16498 update_scanline_offset(crtc_state);
16499 }
16500
16501 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16502 dev_priv->min_voltage_level[crtc->pipe] =
16503 crtc_state->min_voltage_level;
16504
16505 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16506 const struct intel_plane_state *plane_state =
16507 to_intel_plane_state(plane->base.state);
16508
16509 /*
16510 * FIXME don't have the fb yet, so can't
16511 * use intel_plane_data_rate() :(
16512 */
16513 if (plane_state->base.visible)
16514 crtc_state->data_rate[plane->id] =
16515 4 * crtc_state->pixel_rate;
16516 }
16517
16518 intel_bw_crtc_update(bw_state, crtc_state);
16519
16520 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16521 }
16522 }
16523
16524 static void
16525 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16526 {
16527 struct intel_encoder *encoder;
16528
16529 for_each_intel_encoder(&dev_priv->drm, encoder) {
16530 struct intel_crtc_state *crtc_state;
16531
16532 if (!encoder->get_power_domains)
16533 continue;
16534
16535 /*
16536 * MST-primary and inactive encoders don't have a crtc state
16537 * and neither of these require any power domain references.
16538 */
16539 if (!encoder->base.crtc)
16540 continue;
16541
16542 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16543 encoder->get_power_domains(encoder, crtc_state);
16544 }
16545 }
16546
16547 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16548 {
16549 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16550 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16551 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16552 DARBF_GATING_DIS);
16553
16554 if (IS_HASWELL(dev_priv)) {
16555 /*
16556 * WaRsPkgCStateDisplayPMReq:hsw
16557 * System hang if this isn't done before disabling all planes!
16558 */
16559 I915_WRITE(CHICKEN_PAR1_1,
16560 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16561 }
16562 }
16563
16564 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16565 enum port port, i915_reg_t hdmi_reg)
16566 {
16567 u32 val = I915_READ(hdmi_reg);
16568
16569 if (val & SDVO_ENABLE ||
16570 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16571 return;
16572
16573 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16574 port_name(port));
16575
16576 val &= ~SDVO_PIPE_SEL_MASK;
16577 val |= SDVO_PIPE_SEL(PIPE_A);
16578
16579 I915_WRITE(hdmi_reg, val);
16580 }
16581
16582 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16583 enum port port, i915_reg_t dp_reg)
16584 {
16585 u32 val = I915_READ(dp_reg);
16586
16587 if (val & DP_PORT_EN ||
16588 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16589 return;
16590
16591 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16592 port_name(port));
16593
16594 val &= ~DP_PIPE_SEL_MASK;
16595 val |= DP_PIPE_SEL(PIPE_A);
16596
16597 I915_WRITE(dp_reg, val);
16598 }
16599
16600 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16601 {
16602 /*
16603 * The BIOS may select transcoder B on some of the PCH
16604 * ports even it doesn't enable the port. This would trip
16605 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16606 * Sanitize the transcoder select bits to prevent that. We
16607 * assume that the BIOS never actually enabled the port,
16608 * because if it did we'd actually have to toggle the port
16609 * on and back off to make the transcoder A select stick
16610 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16611 * intel_disable_sdvo()).
16612 */
16613 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16614 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16615 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16616
16617 /* PCH SDVOB multiplex with HDMIB */
16618 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16619 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16620 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16621 }
16622
16623 /* Scan out the current hw modeset state,
16624 * and sanitizes it to the current state
16625 */
16626 static void
16627 intel_modeset_setup_hw_state(struct drm_device *dev,
16628 struct drm_modeset_acquire_ctx *ctx)
16629 {
16630 struct drm_i915_private *dev_priv = to_i915(dev);
16631 struct intel_crtc_state *crtc_state;
16632 struct intel_encoder *encoder;
16633 struct intel_crtc *crtc;
16634 intel_wakeref_t wakeref;
16635 int i;
16636
16637 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16638
16639 intel_early_display_was(dev_priv);
16640 intel_modeset_readout_hw_state(dev);
16641
16642 /* HW state is read out, now we need to sanitize this mess. */
16643 get_encoder_power_domains(dev_priv);
16644
16645 if (HAS_PCH_IBX(dev_priv))
16646 ibx_sanitize_pch_ports(dev_priv);
16647
16648 /*
16649 * intel_sanitize_plane_mapping() may need to do vblank
16650 * waits, so we need vblank interrupts restored beforehand.
16651 */
16652 for_each_intel_crtc(&dev_priv->drm, crtc) {
16653 crtc_state = to_intel_crtc_state(crtc->base.state);
16654
16655 drm_crtc_vblank_reset(&crtc->base);
16656
16657 if (crtc_state->base.active)
16658 intel_crtc_vblank_on(crtc_state);
16659 }
16660
16661 intel_sanitize_plane_mapping(dev_priv);
16662
16663 for_each_intel_encoder(dev, encoder)
16664 intel_sanitize_encoder(encoder);
16665
16666 for_each_intel_crtc(&dev_priv->drm, crtc) {
16667 crtc_state = to_intel_crtc_state(crtc->base.state);
16668 intel_sanitize_crtc(crtc, ctx);
16669 intel_dump_pipe_config(crtc, crtc_state,
16670 "[setup_hw_state]");
16671 }
16672
16673 intel_modeset_update_connector_atomic_state(dev);
16674
16675 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16676 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16677
16678 if (!pll->on || pll->active_mask)
16679 continue;
16680
16681 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16682 pll->info->name);
16683
16684 pll->info->funcs->disable(dev_priv, pll);
16685 pll->on = false;
16686 }
16687
16688 if (IS_G4X(dev_priv)) {
16689 g4x_wm_get_hw_state(dev_priv);
16690 g4x_wm_sanitize(dev_priv);
16691 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16692 vlv_wm_get_hw_state(dev_priv);
16693 vlv_wm_sanitize(dev_priv);
16694 } else if (INTEL_GEN(dev_priv) >= 9) {
16695 skl_wm_get_hw_state(dev_priv);
16696 } else if (HAS_PCH_SPLIT(dev_priv)) {
16697 ilk_wm_get_hw_state(dev_priv);
16698 }
16699
16700 for_each_intel_crtc(dev, crtc) {
16701 u64 put_domains;
16702
16703 crtc_state = to_intel_crtc_state(crtc->base.state);
16704 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16705 if (WARN_ON(put_domains))
16706 modeset_put_power_domains(dev_priv, put_domains);
16707 }
16708
16709 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16710
16711 intel_fbc_init_pipe_state(dev_priv);
16712 }
16713
16714 void intel_display_resume(struct drm_device *dev)
16715 {
16716 struct drm_i915_private *dev_priv = to_i915(dev);
16717 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16718 struct drm_modeset_acquire_ctx ctx;
16719 int ret;
16720
16721 dev_priv->modeset_restore_state = NULL;
16722 if (state)
16723 state->acquire_ctx = &ctx;
16724
16725 drm_modeset_acquire_init(&ctx, 0);
16726
16727 while (1) {
16728 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16729 if (ret != -EDEADLK)
16730 break;
16731
16732 drm_modeset_backoff(&ctx);
16733 }
16734
16735 if (!ret)
16736 ret = __intel_display_resume(dev, state, &ctx);
16737
16738 intel_enable_ipc(dev_priv);
16739 drm_modeset_drop_locks(&ctx);
16740 drm_modeset_acquire_fini(&ctx);
16741
16742 if (ret)
16743 DRM_ERROR("Restoring old state failed with %i\n", ret);
16744 if (state)
16745 drm_atomic_state_put(state);
16746 }
16747
16748 static void intel_hpd_poll_fini(struct drm_device *dev)
16749 {
16750 struct intel_connector *connector;
16751 struct drm_connector_list_iter conn_iter;
16752
16753 /* Kill all the work that may have been queued by hpd. */
16754 drm_connector_list_iter_begin(dev, &conn_iter);
16755 for_each_intel_connector_iter(connector, &conn_iter) {
16756 if (connector->modeset_retry_work.func)
16757 cancel_work_sync(&connector->modeset_retry_work);
16758 if (connector->hdcp.shim) {
16759 cancel_delayed_work_sync(&connector->hdcp.check_work);
16760 cancel_work_sync(&connector->hdcp.prop_work);
16761 }
16762 }
16763 drm_connector_list_iter_end(&conn_iter);
16764 }
16765
16766 void intel_modeset_cleanup(struct drm_device *dev)
16767 {
16768 struct drm_i915_private *dev_priv = to_i915(dev);
16769
16770 flush_workqueue(dev_priv->modeset_wq);
16771
16772 flush_work(&dev_priv->atomic_helper.free_work);
16773 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16774
16775 /*
16776 * Interrupts and polling as the first thing to avoid creating havoc.
16777 * Too much stuff here (turning of connectors, ...) would
16778 * experience fancy races otherwise.
16779 */
16780 intel_irq_uninstall(dev_priv);
16781
16782 /*
16783 * Due to the hpd irq storm handling the hotplug work can re-arm the
16784 * poll handlers. Hence disable polling after hpd handling is shut down.
16785 */
16786 intel_hpd_poll_fini(dev);
16787
16788 /* poll work can call into fbdev, hence clean that up afterwards */
16789 intel_fbdev_fini(dev_priv);
16790
16791 intel_unregister_dsm_handler();
16792
16793 intel_fbc_global_disable(dev_priv);
16794
16795 /* flush any delayed tasks or pending work */
16796 flush_scheduled_work();
16797
16798 intel_hdcp_component_fini(dev_priv);
16799
16800 drm_mode_config_cleanup(dev);
16801
16802 intel_overlay_cleanup(dev_priv);
16803
16804 intel_gmbus_teardown(dev_priv);
16805
16806 destroy_workqueue(dev_priv->modeset_wq);
16807
16808 intel_fbc_cleanup_cfb(dev_priv);
16809 }
16810
16811 /*
16812 * set vga decode state - true == enable VGA decode
16813 */
16814 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16815 {
16816 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16817 u16 gmch_ctrl;
16818
16819 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16820 DRM_ERROR("failed to read control word\n");
16821 return -EIO;
16822 }
16823
16824 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16825 return 0;
16826
16827 if (state)
16828 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16829 else
16830 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16831
16832 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16833 DRM_ERROR("failed to write control word\n");
16834 return -EIO;
16835 }
16836
16837 return 0;
16838 }
16839
16840 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16841
16842 struct intel_display_error_state {
16843
16844 u32 power_well_driver;
16845
16846 struct intel_cursor_error_state {
16847 u32 control;
16848 u32 position;
16849 u32 base;
16850 u32 size;
16851 } cursor[I915_MAX_PIPES];
16852
16853 struct intel_pipe_error_state {
16854 bool power_domain_on;
16855 u32 source;
16856 u32 stat;
16857 } pipe[I915_MAX_PIPES];
16858
16859 struct intel_plane_error_state {
16860 u32 control;
16861 u32 stride;
16862 u32 size;
16863 u32 pos;
16864 u32 addr;
16865 u32 surface;
16866 u32 tile_offset;
16867 } plane[I915_MAX_PIPES];
16868
16869 struct intel_transcoder_error_state {
16870 bool available;
16871 bool power_domain_on;
16872 enum transcoder cpu_transcoder;
16873
16874 u32 conf;
16875
16876 u32 htotal;
16877 u32 hblank;
16878 u32 hsync;
16879 u32 vtotal;
16880 u32 vblank;
16881 u32 vsync;
16882 } transcoder[4];
16883 };
16884
16885 struct intel_display_error_state *
16886 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16887 {
16888 struct intel_display_error_state *error;
16889 int transcoders[] = {
16890 TRANSCODER_A,
16891 TRANSCODER_B,
16892 TRANSCODER_C,
16893 TRANSCODER_EDP,
16894 };
16895 int i;
16896
16897 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
16898
16899 if (!HAS_DISPLAY(dev_priv))
16900 return NULL;
16901
16902 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16903 if (error == NULL)
16904 return NULL;
16905
16906 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16907 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16908
16909 for_each_pipe(dev_priv, i) {
16910 error->pipe[i].power_domain_on =
16911 __intel_display_power_is_enabled(dev_priv,
16912 POWER_DOMAIN_PIPE(i));
16913 if (!error->pipe[i].power_domain_on)
16914 continue;
16915
16916 error->cursor[i].control = I915_READ(CURCNTR(i));
16917 error->cursor[i].position = I915_READ(CURPOS(i));
16918 error->cursor[i].base = I915_READ(CURBASE(i));
16919
16920 error->plane[i].control = I915_READ(DSPCNTR(i));
16921 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16922 if (INTEL_GEN(dev_priv) <= 3) {
16923 error->plane[i].size = I915_READ(DSPSIZE(i));
16924 error->plane[i].pos = I915_READ(DSPPOS(i));
16925 }
16926 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16927 error->plane[i].addr = I915_READ(DSPADDR(i));
16928 if (INTEL_GEN(dev_priv) >= 4) {
16929 error->plane[i].surface = I915_READ(DSPSURF(i));
16930 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16931 }
16932
16933 error->pipe[i].source = I915_READ(PIPESRC(i));
16934
16935 if (HAS_GMCH(dev_priv))
16936 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16937 }
16938
16939 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16940 enum transcoder cpu_transcoder = transcoders[i];
16941
16942 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
16943 continue;
16944
16945 error->transcoder[i].available = true;
16946 error->transcoder[i].power_domain_on =
16947 __intel_display_power_is_enabled(dev_priv,
16948 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16949 if (!error->transcoder[i].power_domain_on)
16950 continue;
16951
16952 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16953
16954 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16955 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16956 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16957 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16958 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16959 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16960 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16961 }
16962
16963 return error;
16964 }
16965
16966 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16967
16968 void
16969 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16970 struct intel_display_error_state *error)
16971 {
16972 struct drm_i915_private *dev_priv = m->i915;
16973 int i;
16974
16975 if (!error)
16976 return;
16977
16978 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16979 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16980 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16981 error->power_well_driver);
16982 for_each_pipe(dev_priv, i) {
16983 err_printf(m, "Pipe [%d]:\n", i);
16984 err_printf(m, " Power: %s\n",
16985 onoff(error->pipe[i].power_domain_on));
16986 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
16987 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
16988
16989 err_printf(m, "Plane [%d]:\n", i);
16990 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16991 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
16992 if (INTEL_GEN(dev_priv) <= 3) {
16993 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16994 err_printf(m, " POS: %08x\n", error->plane[i].pos);
16995 }
16996 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16997 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
16998 if (INTEL_GEN(dev_priv) >= 4) {
16999 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
17000 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
17001 }
17002
17003 err_printf(m, "Cursor [%d]:\n", i);
17004 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
17005 err_printf(m, " POS: %08x\n", error->cursor[i].position);
17006 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
17007 }
17008
17009 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17010 if (!error->transcoder[i].available)
17011 continue;
17012
17013 err_printf(m, "CPU transcoder: %s\n",
17014 transcoder_name(error->transcoder[i].cpu_transcoder));
17015 err_printf(m, " Power: %s\n",
17016 onoff(error->transcoder[i].power_domain_on));
17017 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
17018 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
17019 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
17020 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
17021 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
17022 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
17023 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
17024 }
17025 }
17026
17027 #endif