]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915/dsi: Use a fuzzy check for burst mode clock check
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_trace.h"
49 #include "intel_acpi.h"
50 #include "intel_atomic.h"
51 #include "intel_atomic_plane.h"
52 #include "intel_bw.h"
53 #include "intel_color.h"
54 #include "intel_cdclk.h"
55 #include "intel_crt.h"
56 #include "intel_ddi.h"
57 #include "intel_dp.h"
58 #include "intel_drv.h"
59 #include "intel_dsi.h"
60 #include "intel_dvo.h"
61 #include "intel_fbc.h"
62 #include "intel_fbdev.h"
63 #include "intel_fifo_underrun.h"
64 #include "intel_frontbuffer.h"
65 #include "intel_gmbus.h"
66 #include "intel_hdcp.h"
67 #include "intel_hdmi.h"
68 #include "intel_hotplug.h"
69 #include "intel_lvds.h"
70 #include "intel_overlay.h"
71 #include "intel_pipe_crc.h"
72 #include "intel_pm.h"
73 #include "intel_psr.h"
74 #include "intel_quirks.h"
75 #include "intel_sdvo.h"
76 #include "intel_sideband.h"
77 #include "intel_sprite.h"
78 #include "intel_tv.h"
79 #include "intel_vdsc.h"
80
81 /* Primary plane formats for gen <= 3 */
82 static const u32 i8xx_primary_formats[] = {
83 DRM_FORMAT_C8,
84 DRM_FORMAT_RGB565,
85 DRM_FORMAT_XRGB1555,
86 DRM_FORMAT_XRGB8888,
87 };
88
89 /* Primary plane formats for gen >= 4 */
90 static const u32 i965_primary_formats[] = {
91 DRM_FORMAT_C8,
92 DRM_FORMAT_RGB565,
93 DRM_FORMAT_XRGB8888,
94 DRM_FORMAT_XBGR8888,
95 DRM_FORMAT_XRGB2101010,
96 DRM_FORMAT_XBGR2101010,
97 };
98
99 static const u64 i9xx_format_modifiers[] = {
100 I915_FORMAT_MOD_X_TILED,
101 DRM_FORMAT_MOD_LINEAR,
102 DRM_FORMAT_MOD_INVALID
103 };
104
105 /* Cursor formats */
106 static const u32 intel_cursor_formats[] = {
107 DRM_FORMAT_ARGB8888,
108 };
109
110 static const u64 cursor_format_modifiers[] = {
111 DRM_FORMAT_MOD_LINEAR,
112 DRM_FORMAT_MOD_INVALID
113 };
114
115 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
116 struct intel_crtc_state *pipe_config);
117 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
118 struct intel_crtc_state *pipe_config);
119
120 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
121 struct drm_i915_gem_object *obj,
122 struct drm_mode_fb_cmd2 *mode_cmd);
123 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
124 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
125 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
126 const struct intel_link_m_n *m_n,
127 const struct intel_link_m_n *m2_n2);
128 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
129 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
130 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
132 static void vlv_prepare_pll(struct intel_crtc *crtc,
133 const struct intel_crtc_state *pipe_config);
134 static void chv_prepare_pll(struct intel_crtc *crtc,
135 const struct intel_crtc_state *pipe_config);
136 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
137 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
138 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
139 struct intel_crtc_state *crtc_state);
140 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
141 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
142 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
143 static void intel_modeset_setup_hw_state(struct drm_device *dev,
144 struct drm_modeset_acquire_ctx *ctx);
145 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
146
147 struct intel_limit {
148 struct {
149 int min, max;
150 } dot, vco, n, m, m1, m2, p, p1;
151
152 struct {
153 int dot_limit;
154 int p2_slow, p2_fast;
155 } p2;
156 };
157
158 /* returns HPLL frequency in kHz */
159 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
160 {
161 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
162
163 /* Obtain SKU information */
164 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
165 CCK_FUSE_HPLL_FREQ_MASK;
166
167 return vco_freq[hpll_freq] * 1000;
168 }
169
170 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
171 const char *name, u32 reg, int ref_freq)
172 {
173 u32 val;
174 int divider;
175
176 val = vlv_cck_read(dev_priv, reg);
177 divider = val & CCK_FREQUENCY_VALUES;
178
179 WARN((val & CCK_FREQUENCY_STATUS) !=
180 (divider << CCK_FREQUENCY_STATUS_SHIFT),
181 "%s change in progress\n", name);
182
183 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
184 }
185
186 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
187 const char *name, u32 reg)
188 {
189 int hpll;
190
191 vlv_cck_get(dev_priv);
192
193 if (dev_priv->hpll_freq == 0)
194 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
195
196 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
197
198 vlv_cck_put(dev_priv);
199
200 return hpll;
201 }
202
203 static void intel_update_czclk(struct drm_i915_private *dev_priv)
204 {
205 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
206 return;
207
208 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
209 CCK_CZ_CLOCK_CONTROL);
210
211 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
212 }
213
214 static inline u32 /* units of 100MHz */
215 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
216 const struct intel_crtc_state *pipe_config)
217 {
218 if (HAS_DDI(dev_priv))
219 return pipe_config->port_clock; /* SPLL */
220 else
221 return dev_priv->fdi_pll_freq;
222 }
223
224 static const struct intel_limit intel_limits_i8xx_dac = {
225 .dot = { .min = 25000, .max = 350000 },
226 .vco = { .min = 908000, .max = 1512000 },
227 .n = { .min = 2, .max = 16 },
228 .m = { .min = 96, .max = 140 },
229 .m1 = { .min = 18, .max = 26 },
230 .m2 = { .min = 6, .max = 16 },
231 .p = { .min = 4, .max = 128 },
232 .p1 = { .min = 2, .max = 33 },
233 .p2 = { .dot_limit = 165000,
234 .p2_slow = 4, .p2_fast = 2 },
235 };
236
237 static const struct intel_limit intel_limits_i8xx_dvo = {
238 .dot = { .min = 25000, .max = 350000 },
239 .vco = { .min = 908000, .max = 1512000 },
240 .n = { .min = 2, .max = 16 },
241 .m = { .min = 96, .max = 140 },
242 .m1 = { .min = 18, .max = 26 },
243 .m2 = { .min = 6, .max = 16 },
244 .p = { .min = 4, .max = 128 },
245 .p1 = { .min = 2, .max = 33 },
246 .p2 = { .dot_limit = 165000,
247 .p2_slow = 4, .p2_fast = 4 },
248 };
249
250 static const struct intel_limit intel_limits_i8xx_lvds = {
251 .dot = { .min = 25000, .max = 350000 },
252 .vco = { .min = 908000, .max = 1512000 },
253 .n = { .min = 2, .max = 16 },
254 .m = { .min = 96, .max = 140 },
255 .m1 = { .min = 18, .max = 26 },
256 .m2 = { .min = 6, .max = 16 },
257 .p = { .min = 4, .max = 128 },
258 .p1 = { .min = 1, .max = 6 },
259 .p2 = { .dot_limit = 165000,
260 .p2_slow = 14, .p2_fast = 7 },
261 };
262
263 static const struct intel_limit intel_limits_i9xx_sdvo = {
264 .dot = { .min = 20000, .max = 400000 },
265 .vco = { .min = 1400000, .max = 2800000 },
266 .n = { .min = 1, .max = 6 },
267 .m = { .min = 70, .max = 120 },
268 .m1 = { .min = 8, .max = 18 },
269 .m2 = { .min = 3, .max = 7 },
270 .p = { .min = 5, .max = 80 },
271 .p1 = { .min = 1, .max = 8 },
272 .p2 = { .dot_limit = 200000,
273 .p2_slow = 10, .p2_fast = 5 },
274 };
275
276 static const struct intel_limit intel_limits_i9xx_lvds = {
277 .dot = { .min = 20000, .max = 400000 },
278 .vco = { .min = 1400000, .max = 2800000 },
279 .n = { .min = 1, .max = 6 },
280 .m = { .min = 70, .max = 120 },
281 .m1 = { .min = 8, .max = 18 },
282 .m2 = { .min = 3, .max = 7 },
283 .p = { .min = 7, .max = 98 },
284 .p1 = { .min = 1, .max = 8 },
285 .p2 = { .dot_limit = 112000,
286 .p2_slow = 14, .p2_fast = 7 },
287 };
288
289
290 static const struct intel_limit intel_limits_g4x_sdvo = {
291 .dot = { .min = 25000, .max = 270000 },
292 .vco = { .min = 1750000, .max = 3500000},
293 .n = { .min = 1, .max = 4 },
294 .m = { .min = 104, .max = 138 },
295 .m1 = { .min = 17, .max = 23 },
296 .m2 = { .min = 5, .max = 11 },
297 .p = { .min = 10, .max = 30 },
298 .p1 = { .min = 1, .max = 3},
299 .p2 = { .dot_limit = 270000,
300 .p2_slow = 10,
301 .p2_fast = 10
302 },
303 };
304
305 static const struct intel_limit intel_limits_g4x_hdmi = {
306 .dot = { .min = 22000, .max = 400000 },
307 .vco = { .min = 1750000, .max = 3500000},
308 .n = { .min = 1, .max = 4 },
309 .m = { .min = 104, .max = 138 },
310 .m1 = { .min = 16, .max = 23 },
311 .m2 = { .min = 5, .max = 11 },
312 .p = { .min = 5, .max = 80 },
313 .p1 = { .min = 1, .max = 8},
314 .p2 = { .dot_limit = 165000,
315 .p2_slow = 10, .p2_fast = 5 },
316 };
317
318 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
319 .dot = { .min = 20000, .max = 115000 },
320 .vco = { .min = 1750000, .max = 3500000 },
321 .n = { .min = 1, .max = 3 },
322 .m = { .min = 104, .max = 138 },
323 .m1 = { .min = 17, .max = 23 },
324 .m2 = { .min = 5, .max = 11 },
325 .p = { .min = 28, .max = 112 },
326 .p1 = { .min = 2, .max = 8 },
327 .p2 = { .dot_limit = 0,
328 .p2_slow = 14, .p2_fast = 14
329 },
330 };
331
332 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
333 .dot = { .min = 80000, .max = 224000 },
334 .vco = { .min = 1750000, .max = 3500000 },
335 .n = { .min = 1, .max = 3 },
336 .m = { .min = 104, .max = 138 },
337 .m1 = { .min = 17, .max = 23 },
338 .m2 = { .min = 5, .max = 11 },
339 .p = { .min = 14, .max = 42 },
340 .p1 = { .min = 2, .max = 6 },
341 .p2 = { .dot_limit = 0,
342 .p2_slow = 7, .p2_fast = 7
343 },
344 };
345
346 static const struct intel_limit intel_limits_pineview_sdvo = {
347 .dot = { .min = 20000, .max = 400000},
348 .vco = { .min = 1700000, .max = 3500000 },
349 /* Pineview's Ncounter is a ring counter */
350 .n = { .min = 3, .max = 6 },
351 .m = { .min = 2, .max = 256 },
352 /* Pineview only has one combined m divider, which we treat as m2. */
353 .m1 = { .min = 0, .max = 0 },
354 .m2 = { .min = 0, .max = 254 },
355 .p = { .min = 5, .max = 80 },
356 .p1 = { .min = 1, .max = 8 },
357 .p2 = { .dot_limit = 200000,
358 .p2_slow = 10, .p2_fast = 5 },
359 };
360
361 static const struct intel_limit intel_limits_pineview_lvds = {
362 .dot = { .min = 20000, .max = 400000 },
363 .vco = { .min = 1700000, .max = 3500000 },
364 .n = { .min = 3, .max = 6 },
365 .m = { .min = 2, .max = 256 },
366 .m1 = { .min = 0, .max = 0 },
367 .m2 = { .min = 0, .max = 254 },
368 .p = { .min = 7, .max = 112 },
369 .p1 = { .min = 1, .max = 8 },
370 .p2 = { .dot_limit = 112000,
371 .p2_slow = 14, .p2_fast = 14 },
372 };
373
374 /* Ironlake / Sandybridge
375 *
376 * We calculate clock using (register_value + 2) for N/M1/M2, so here
377 * the range value for them is (actual_value - 2).
378 */
379 static const struct intel_limit intel_limits_ironlake_dac = {
380 .dot = { .min = 25000, .max = 350000 },
381 .vco = { .min = 1760000, .max = 3510000 },
382 .n = { .min = 1, .max = 5 },
383 .m = { .min = 79, .max = 127 },
384 .m1 = { .min = 12, .max = 22 },
385 .m2 = { .min = 5, .max = 9 },
386 .p = { .min = 5, .max = 80 },
387 .p1 = { .min = 1, .max = 8 },
388 .p2 = { .dot_limit = 225000,
389 .p2_slow = 10, .p2_fast = 5 },
390 };
391
392 static const struct intel_limit intel_limits_ironlake_single_lvds = {
393 .dot = { .min = 25000, .max = 350000 },
394 .vco = { .min = 1760000, .max = 3510000 },
395 .n = { .min = 1, .max = 3 },
396 .m = { .min = 79, .max = 118 },
397 .m1 = { .min = 12, .max = 22 },
398 .m2 = { .min = 5, .max = 9 },
399 .p = { .min = 28, .max = 112 },
400 .p1 = { .min = 2, .max = 8 },
401 .p2 = { .dot_limit = 225000,
402 .p2_slow = 14, .p2_fast = 14 },
403 };
404
405 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
406 .dot = { .min = 25000, .max = 350000 },
407 .vco = { .min = 1760000, .max = 3510000 },
408 .n = { .min = 1, .max = 3 },
409 .m = { .min = 79, .max = 127 },
410 .m1 = { .min = 12, .max = 22 },
411 .m2 = { .min = 5, .max = 9 },
412 .p = { .min = 14, .max = 56 },
413 .p1 = { .min = 2, .max = 8 },
414 .p2 = { .dot_limit = 225000,
415 .p2_slow = 7, .p2_fast = 7 },
416 };
417
418 /* LVDS 100mhz refclk limits. */
419 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
420 .dot = { .min = 25000, .max = 350000 },
421 .vco = { .min = 1760000, .max = 3510000 },
422 .n = { .min = 1, .max = 2 },
423 .m = { .min = 79, .max = 126 },
424 .m1 = { .min = 12, .max = 22 },
425 .m2 = { .min = 5, .max = 9 },
426 .p = { .min = 28, .max = 112 },
427 .p1 = { .min = 2, .max = 8 },
428 .p2 = { .dot_limit = 225000,
429 .p2_slow = 14, .p2_fast = 14 },
430 };
431
432 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
433 .dot = { .min = 25000, .max = 350000 },
434 .vco = { .min = 1760000, .max = 3510000 },
435 .n = { .min = 1, .max = 3 },
436 .m = { .min = 79, .max = 126 },
437 .m1 = { .min = 12, .max = 22 },
438 .m2 = { .min = 5, .max = 9 },
439 .p = { .min = 14, .max = 42 },
440 .p1 = { .min = 2, .max = 6 },
441 .p2 = { .dot_limit = 225000,
442 .p2_slow = 7, .p2_fast = 7 },
443 };
444
445 static const struct intel_limit intel_limits_vlv = {
446 /*
447 * These are the data rate limits (measured in fast clocks)
448 * since those are the strictest limits we have. The fast
449 * clock and actual rate limits are more relaxed, so checking
450 * them would make no difference.
451 */
452 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
453 .vco = { .min = 4000000, .max = 6000000 },
454 .n = { .min = 1, .max = 7 },
455 .m1 = { .min = 2, .max = 3 },
456 .m2 = { .min = 11, .max = 156 },
457 .p1 = { .min = 2, .max = 3 },
458 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
459 };
460
461 static const struct intel_limit intel_limits_chv = {
462 /*
463 * These are the data rate limits (measured in fast clocks)
464 * since those are the strictest limits we have. The fast
465 * clock and actual rate limits are more relaxed, so checking
466 * them would make no difference.
467 */
468 .dot = { .min = 25000 * 5, .max = 540000 * 5},
469 .vco = { .min = 4800000, .max = 6480000 },
470 .n = { .min = 1, .max = 1 },
471 .m1 = { .min = 2, .max = 2 },
472 .m2 = { .min = 24 << 22, .max = 175 << 22 },
473 .p1 = { .min = 2, .max = 4 },
474 .p2 = { .p2_slow = 1, .p2_fast = 14 },
475 };
476
477 static const struct intel_limit intel_limits_bxt = {
478 /* FIXME: find real dot limits */
479 .dot = { .min = 0, .max = INT_MAX },
480 .vco = { .min = 4800000, .max = 6700000 },
481 .n = { .min = 1, .max = 1 },
482 .m1 = { .min = 2, .max = 2 },
483 /* FIXME: find real m2 limits */
484 .m2 = { .min = 2 << 22, .max = 255 << 22 },
485 .p1 = { .min = 2, .max = 4 },
486 .p2 = { .p2_slow = 1, .p2_fast = 20 },
487 };
488
489 /* WA Display #0827: Gen9:all */
490 static void
491 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
492 {
493 if (enable)
494 I915_WRITE(CLKGATE_DIS_PSL(pipe),
495 I915_READ(CLKGATE_DIS_PSL(pipe)) |
496 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
497 else
498 I915_WRITE(CLKGATE_DIS_PSL(pipe),
499 I915_READ(CLKGATE_DIS_PSL(pipe)) &
500 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
501 }
502
503 /* Wa_2006604312:icl */
504 static void
505 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
506 bool enable)
507 {
508 if (enable)
509 I915_WRITE(CLKGATE_DIS_PSL(pipe),
510 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
511 else
512 I915_WRITE(CLKGATE_DIS_PSL(pipe),
513 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
514 }
515
516 static bool
517 needs_modeset(const struct drm_crtc_state *state)
518 {
519 return drm_atomic_crtc_needs_modeset(state);
520 }
521
522 /*
523 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
524 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
525 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
526 * The helpers' return value is the rate of the clock that is fed to the
527 * display engine's pipe which can be the above fast dot clock rate or a
528 * divided-down version of it.
529 */
530 /* m1 is reserved as 0 in Pineview, n is a ring counter */
531 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
532 {
533 clock->m = clock->m2 + 2;
534 clock->p = clock->p1 * clock->p2;
535 if (WARN_ON(clock->n == 0 || clock->p == 0))
536 return 0;
537 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
538 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
539
540 return clock->dot;
541 }
542
543 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
544 {
545 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
546 }
547
548 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
549 {
550 clock->m = i9xx_dpll_compute_m(clock);
551 clock->p = clock->p1 * clock->p2;
552 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
553 return 0;
554 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
555 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
556
557 return clock->dot;
558 }
559
560 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
561 {
562 clock->m = clock->m1 * clock->m2;
563 clock->p = clock->p1 * clock->p2;
564 if (WARN_ON(clock->n == 0 || clock->p == 0))
565 return 0;
566 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
567 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
568
569 return clock->dot / 5;
570 }
571
572 int chv_calc_dpll_params(int refclk, struct dpll *clock)
573 {
574 clock->m = clock->m1 * clock->m2;
575 clock->p = clock->p1 * clock->p2;
576 if (WARN_ON(clock->n == 0 || clock->p == 0))
577 return 0;
578 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
579 clock->n << 22);
580 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
581
582 return clock->dot / 5;
583 }
584
585 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
586
587 /*
588 * Returns whether the given set of divisors are valid for a given refclk with
589 * the given connectors.
590 */
591 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
592 const struct intel_limit *limit,
593 const struct dpll *clock)
594 {
595 if (clock->n < limit->n.min || limit->n.max < clock->n)
596 INTELPllInvalid("n out of range\n");
597 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
598 INTELPllInvalid("p1 out of range\n");
599 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
600 INTELPllInvalid("m2 out of range\n");
601 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
602 INTELPllInvalid("m1 out of range\n");
603
604 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
605 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
606 if (clock->m1 <= clock->m2)
607 INTELPllInvalid("m1 <= m2\n");
608
609 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
610 !IS_GEN9_LP(dev_priv)) {
611 if (clock->p < limit->p.min || limit->p.max < clock->p)
612 INTELPllInvalid("p out of range\n");
613 if (clock->m < limit->m.min || limit->m.max < clock->m)
614 INTELPllInvalid("m out of range\n");
615 }
616
617 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
618 INTELPllInvalid("vco out of range\n");
619 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
620 * connector, etc., rather than just a single range.
621 */
622 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
623 INTELPllInvalid("dot out of range\n");
624
625 return true;
626 }
627
628 static int
629 i9xx_select_p2_div(const struct intel_limit *limit,
630 const struct intel_crtc_state *crtc_state,
631 int target)
632 {
633 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
634
635 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
636 /*
637 * For LVDS just rely on its current settings for dual-channel.
638 * We haven't figured out how to reliably set up different
639 * single/dual channel state, if we even can.
640 */
641 if (intel_is_dual_link_lvds(dev_priv))
642 return limit->p2.p2_fast;
643 else
644 return limit->p2.p2_slow;
645 } else {
646 if (target < limit->p2.dot_limit)
647 return limit->p2.p2_slow;
648 else
649 return limit->p2.p2_fast;
650 }
651 }
652
653 /*
654 * Returns a set of divisors for the desired target clock with the given
655 * refclk, or FALSE. The returned values represent the clock equation:
656 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
657 *
658 * Target and reference clocks are specified in kHz.
659 *
660 * If match_clock is provided, then best_clock P divider must match the P
661 * divider from @match_clock used for LVDS downclocking.
662 */
663 static bool
664 i9xx_find_best_dpll(const struct intel_limit *limit,
665 struct intel_crtc_state *crtc_state,
666 int target, int refclk, struct dpll *match_clock,
667 struct dpll *best_clock)
668 {
669 struct drm_device *dev = crtc_state->base.crtc->dev;
670 struct dpll clock;
671 int err = target;
672
673 memset(best_clock, 0, sizeof(*best_clock));
674
675 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
676
677 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
678 clock.m1++) {
679 for (clock.m2 = limit->m2.min;
680 clock.m2 <= limit->m2.max; clock.m2++) {
681 if (clock.m2 >= clock.m1)
682 break;
683 for (clock.n = limit->n.min;
684 clock.n <= limit->n.max; clock.n++) {
685 for (clock.p1 = limit->p1.min;
686 clock.p1 <= limit->p1.max; clock.p1++) {
687 int this_err;
688
689 i9xx_calc_dpll_params(refclk, &clock);
690 if (!intel_PLL_is_valid(to_i915(dev),
691 limit,
692 &clock))
693 continue;
694 if (match_clock &&
695 clock.p != match_clock->p)
696 continue;
697
698 this_err = abs(clock.dot - target);
699 if (this_err < err) {
700 *best_clock = clock;
701 err = this_err;
702 }
703 }
704 }
705 }
706 }
707
708 return (err != target);
709 }
710
711 /*
712 * Returns a set of divisors for the desired target clock with the given
713 * refclk, or FALSE. The returned values represent the clock equation:
714 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
715 *
716 * Target and reference clocks are specified in kHz.
717 *
718 * If match_clock is provided, then best_clock P divider must match the P
719 * divider from @match_clock used for LVDS downclocking.
720 */
721 static bool
722 pnv_find_best_dpll(const struct intel_limit *limit,
723 struct intel_crtc_state *crtc_state,
724 int target, int refclk, struct dpll *match_clock,
725 struct dpll *best_clock)
726 {
727 struct drm_device *dev = crtc_state->base.crtc->dev;
728 struct dpll clock;
729 int err = target;
730
731 memset(best_clock, 0, sizeof(*best_clock));
732
733 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
734
735 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
736 clock.m1++) {
737 for (clock.m2 = limit->m2.min;
738 clock.m2 <= limit->m2.max; clock.m2++) {
739 for (clock.n = limit->n.min;
740 clock.n <= limit->n.max; clock.n++) {
741 for (clock.p1 = limit->p1.min;
742 clock.p1 <= limit->p1.max; clock.p1++) {
743 int this_err;
744
745 pnv_calc_dpll_params(refclk, &clock);
746 if (!intel_PLL_is_valid(to_i915(dev),
747 limit,
748 &clock))
749 continue;
750 if (match_clock &&
751 clock.p != match_clock->p)
752 continue;
753
754 this_err = abs(clock.dot - target);
755 if (this_err < err) {
756 *best_clock = clock;
757 err = this_err;
758 }
759 }
760 }
761 }
762 }
763
764 return (err != target);
765 }
766
767 /*
768 * Returns a set of divisors for the desired target clock with the given
769 * refclk, or FALSE. The returned values represent the clock equation:
770 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
771 *
772 * Target and reference clocks are specified in kHz.
773 *
774 * If match_clock is provided, then best_clock P divider must match the P
775 * divider from @match_clock used for LVDS downclocking.
776 */
777 static bool
778 g4x_find_best_dpll(const struct intel_limit *limit,
779 struct intel_crtc_state *crtc_state,
780 int target, int refclk, struct dpll *match_clock,
781 struct dpll *best_clock)
782 {
783 struct drm_device *dev = crtc_state->base.crtc->dev;
784 struct dpll clock;
785 int max_n;
786 bool found = false;
787 /* approximately equals target * 0.00585 */
788 int err_most = (target >> 8) + (target >> 9);
789
790 memset(best_clock, 0, sizeof(*best_clock));
791
792 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
793
794 max_n = limit->n.max;
795 /* based on hardware requirement, prefer smaller n to precision */
796 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
797 /* based on hardware requirement, prefere larger m1,m2 */
798 for (clock.m1 = limit->m1.max;
799 clock.m1 >= limit->m1.min; clock.m1--) {
800 for (clock.m2 = limit->m2.max;
801 clock.m2 >= limit->m2.min; clock.m2--) {
802 for (clock.p1 = limit->p1.max;
803 clock.p1 >= limit->p1.min; clock.p1--) {
804 int this_err;
805
806 i9xx_calc_dpll_params(refclk, &clock);
807 if (!intel_PLL_is_valid(to_i915(dev),
808 limit,
809 &clock))
810 continue;
811
812 this_err = abs(clock.dot - target);
813 if (this_err < err_most) {
814 *best_clock = clock;
815 err_most = this_err;
816 max_n = clock.n;
817 found = true;
818 }
819 }
820 }
821 }
822 }
823 return found;
824 }
825
826 /*
827 * Check if the calculated PLL configuration is more optimal compared to the
828 * best configuration and error found so far. Return the calculated error.
829 */
830 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
831 const struct dpll *calculated_clock,
832 const struct dpll *best_clock,
833 unsigned int best_error_ppm,
834 unsigned int *error_ppm)
835 {
836 /*
837 * For CHV ignore the error and consider only the P value.
838 * Prefer a bigger P value based on HW requirements.
839 */
840 if (IS_CHERRYVIEW(to_i915(dev))) {
841 *error_ppm = 0;
842
843 return calculated_clock->p > best_clock->p;
844 }
845
846 if (WARN_ON_ONCE(!target_freq))
847 return false;
848
849 *error_ppm = div_u64(1000000ULL *
850 abs(target_freq - calculated_clock->dot),
851 target_freq);
852 /*
853 * Prefer a better P value over a better (smaller) error if the error
854 * is small. Ensure this preference for future configurations too by
855 * setting the error to 0.
856 */
857 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
858 *error_ppm = 0;
859
860 return true;
861 }
862
863 return *error_ppm + 10 < best_error_ppm;
864 }
865
866 /*
867 * Returns a set of divisors for the desired target clock with the given
868 * refclk, or FALSE. The returned values represent the clock equation:
869 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
870 */
871 static bool
872 vlv_find_best_dpll(const struct intel_limit *limit,
873 struct intel_crtc_state *crtc_state,
874 int target, int refclk, struct dpll *match_clock,
875 struct dpll *best_clock)
876 {
877 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
878 struct drm_device *dev = crtc->base.dev;
879 struct dpll clock;
880 unsigned int bestppm = 1000000;
881 /* min update 19.2 MHz */
882 int max_n = min(limit->n.max, refclk / 19200);
883 bool found = false;
884
885 target *= 5; /* fast clock */
886
887 memset(best_clock, 0, sizeof(*best_clock));
888
889 /* based on hardware requirement, prefer smaller n to precision */
890 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
891 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
892 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
893 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
894 clock.p = clock.p1 * clock.p2;
895 /* based on hardware requirement, prefer bigger m1,m2 values */
896 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
897 unsigned int ppm;
898
899 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
900 refclk * clock.m1);
901
902 vlv_calc_dpll_params(refclk, &clock);
903
904 if (!intel_PLL_is_valid(to_i915(dev),
905 limit,
906 &clock))
907 continue;
908
909 if (!vlv_PLL_is_optimal(dev, target,
910 &clock,
911 best_clock,
912 bestppm, &ppm))
913 continue;
914
915 *best_clock = clock;
916 bestppm = ppm;
917 found = true;
918 }
919 }
920 }
921 }
922
923 return found;
924 }
925
926 /*
927 * Returns a set of divisors for the desired target clock with the given
928 * refclk, or FALSE. The returned values represent the clock equation:
929 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
930 */
931 static bool
932 chv_find_best_dpll(const struct intel_limit *limit,
933 struct intel_crtc_state *crtc_state,
934 int target, int refclk, struct dpll *match_clock,
935 struct dpll *best_clock)
936 {
937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
938 struct drm_device *dev = crtc->base.dev;
939 unsigned int best_error_ppm;
940 struct dpll clock;
941 u64 m2;
942 int found = false;
943
944 memset(best_clock, 0, sizeof(*best_clock));
945 best_error_ppm = 1000000;
946
947 /*
948 * Based on hardware doc, the n always set to 1, and m1 always
949 * set to 2. If requires to support 200Mhz refclk, we need to
950 * revisit this because n may not 1 anymore.
951 */
952 clock.n = 1, clock.m1 = 2;
953 target *= 5; /* fast clock */
954
955 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
956 for (clock.p2 = limit->p2.p2_fast;
957 clock.p2 >= limit->p2.p2_slow;
958 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
959 unsigned int error_ppm;
960
961 clock.p = clock.p1 * clock.p2;
962
963 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
964 refclk * clock.m1);
965
966 if (m2 > INT_MAX/clock.m1)
967 continue;
968
969 clock.m2 = m2;
970
971 chv_calc_dpll_params(refclk, &clock);
972
973 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
974 continue;
975
976 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
977 best_error_ppm, &error_ppm))
978 continue;
979
980 *best_clock = clock;
981 best_error_ppm = error_ppm;
982 found = true;
983 }
984 }
985
986 return found;
987 }
988
989 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
990 struct dpll *best_clock)
991 {
992 int refclk = 100000;
993 const struct intel_limit *limit = &intel_limits_bxt;
994
995 return chv_find_best_dpll(limit, crtc_state,
996 crtc_state->port_clock, refclk,
997 NULL, best_clock);
998 }
999
1000 bool intel_crtc_active(struct intel_crtc *crtc)
1001 {
1002 /* Be paranoid as we can arrive here with only partial
1003 * state retrieved from the hardware during setup.
1004 *
1005 * We can ditch the adjusted_mode.crtc_clock check as soon
1006 * as Haswell has gained clock readout/fastboot support.
1007 *
1008 * We can ditch the crtc->primary->state->fb check as soon as we can
1009 * properly reconstruct framebuffers.
1010 *
1011 * FIXME: The intel_crtc->active here should be switched to
1012 * crtc->state->active once we have proper CRTC states wired up
1013 * for atomic.
1014 */
1015 return crtc->active && crtc->base.primary->state->fb &&
1016 crtc->config->base.adjusted_mode.crtc_clock;
1017 }
1018
1019 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1020 enum pipe pipe)
1021 {
1022 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1023
1024 return crtc->config->cpu_transcoder;
1025 }
1026
1027 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1028 enum pipe pipe)
1029 {
1030 i915_reg_t reg = PIPEDSL(pipe);
1031 u32 line1, line2;
1032 u32 line_mask;
1033
1034 if (IS_GEN(dev_priv, 2))
1035 line_mask = DSL_LINEMASK_GEN2;
1036 else
1037 line_mask = DSL_LINEMASK_GEN3;
1038
1039 line1 = I915_READ(reg) & line_mask;
1040 msleep(5);
1041 line2 = I915_READ(reg) & line_mask;
1042
1043 return line1 != line2;
1044 }
1045
1046 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1047 {
1048 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1049 enum pipe pipe = crtc->pipe;
1050
1051 /* Wait for the display line to settle/start moving */
1052 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1053 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1054 pipe_name(pipe), onoff(state));
1055 }
1056
1057 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1058 {
1059 wait_for_pipe_scanline_moving(crtc, false);
1060 }
1061
1062 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1063 {
1064 wait_for_pipe_scanline_moving(crtc, true);
1065 }
1066
1067 static void
1068 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1069 {
1070 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1072
1073 if (INTEL_GEN(dev_priv) >= 4) {
1074 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1075 i915_reg_t reg = PIPECONF(cpu_transcoder);
1076
1077 /* Wait for the Pipe State to go off */
1078 if (intel_wait_for_register(&dev_priv->uncore,
1079 reg, I965_PIPECONF_ACTIVE, 0,
1080 100))
1081 WARN(1, "pipe_off wait timed out\n");
1082 } else {
1083 intel_wait_for_pipe_scanline_stopped(crtc);
1084 }
1085 }
1086
1087 /* Only for pre-ILK configs */
1088 void assert_pll(struct drm_i915_private *dev_priv,
1089 enum pipe pipe, bool state)
1090 {
1091 u32 val;
1092 bool cur_state;
1093
1094 val = I915_READ(DPLL(pipe));
1095 cur_state = !!(val & DPLL_VCO_ENABLE);
1096 I915_STATE_WARN(cur_state != state,
1097 "PLL state assertion failure (expected %s, current %s)\n",
1098 onoff(state), onoff(cur_state));
1099 }
1100
1101 /* XXX: the dsi pll is shared between MIPI DSI ports */
1102 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1103 {
1104 u32 val;
1105 bool cur_state;
1106
1107 vlv_cck_get(dev_priv);
1108 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1109 vlv_cck_put(dev_priv);
1110
1111 cur_state = val & DSI_PLL_VCO_EN;
1112 I915_STATE_WARN(cur_state != state,
1113 "DSI PLL state assertion failure (expected %s, current %s)\n",
1114 onoff(state), onoff(cur_state));
1115 }
1116
1117 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1118 enum pipe pipe, bool state)
1119 {
1120 bool cur_state;
1121 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1122 pipe);
1123
1124 if (HAS_DDI(dev_priv)) {
1125 /* DDI does not have a specific FDI_TX register */
1126 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1127 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1128 } else {
1129 u32 val = I915_READ(FDI_TX_CTL(pipe));
1130 cur_state = !!(val & FDI_TX_ENABLE);
1131 }
1132 I915_STATE_WARN(cur_state != state,
1133 "FDI TX state assertion failure (expected %s, current %s)\n",
1134 onoff(state), onoff(cur_state));
1135 }
1136 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1137 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1138
1139 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1140 enum pipe pipe, bool state)
1141 {
1142 u32 val;
1143 bool cur_state;
1144
1145 val = I915_READ(FDI_RX_CTL(pipe));
1146 cur_state = !!(val & FDI_RX_ENABLE);
1147 I915_STATE_WARN(cur_state != state,
1148 "FDI RX state assertion failure (expected %s, current %s)\n",
1149 onoff(state), onoff(cur_state));
1150 }
1151 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1152 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1153
1154 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1155 enum pipe pipe)
1156 {
1157 u32 val;
1158
1159 /* ILK FDI PLL is always enabled */
1160 if (IS_GEN(dev_priv, 5))
1161 return;
1162
1163 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1164 if (HAS_DDI(dev_priv))
1165 return;
1166
1167 val = I915_READ(FDI_TX_CTL(pipe));
1168 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1169 }
1170
1171 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1172 enum pipe pipe, bool state)
1173 {
1174 u32 val;
1175 bool cur_state;
1176
1177 val = I915_READ(FDI_RX_CTL(pipe));
1178 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1179 I915_STATE_WARN(cur_state != state,
1180 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1181 onoff(state), onoff(cur_state));
1182 }
1183
1184 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1185 {
1186 i915_reg_t pp_reg;
1187 u32 val;
1188 enum pipe panel_pipe = INVALID_PIPE;
1189 bool locked = true;
1190
1191 if (WARN_ON(HAS_DDI(dev_priv)))
1192 return;
1193
1194 if (HAS_PCH_SPLIT(dev_priv)) {
1195 u32 port_sel;
1196
1197 pp_reg = PP_CONTROL(0);
1198 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1199
1200 switch (port_sel) {
1201 case PANEL_PORT_SELECT_LVDS:
1202 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1203 break;
1204 case PANEL_PORT_SELECT_DPA:
1205 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1206 break;
1207 case PANEL_PORT_SELECT_DPC:
1208 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1209 break;
1210 case PANEL_PORT_SELECT_DPD:
1211 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1212 break;
1213 default:
1214 MISSING_CASE(port_sel);
1215 break;
1216 }
1217 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1218 /* presumably write lock depends on pipe, not port select */
1219 pp_reg = PP_CONTROL(pipe);
1220 panel_pipe = pipe;
1221 } else {
1222 u32 port_sel;
1223
1224 pp_reg = PP_CONTROL(0);
1225 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1226
1227 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1228 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1229 }
1230
1231 val = I915_READ(pp_reg);
1232 if (!(val & PANEL_POWER_ON) ||
1233 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1234 locked = false;
1235
1236 I915_STATE_WARN(panel_pipe == pipe && locked,
1237 "panel assertion failure, pipe %c regs locked\n",
1238 pipe_name(pipe));
1239 }
1240
1241 void assert_pipe(struct drm_i915_private *dev_priv,
1242 enum pipe pipe, bool state)
1243 {
1244 bool cur_state;
1245 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1246 pipe);
1247 enum intel_display_power_domain power_domain;
1248 intel_wakeref_t wakeref;
1249
1250 /* we keep both pipes enabled on 830 */
1251 if (IS_I830(dev_priv))
1252 state = true;
1253
1254 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1255 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1256 if (wakeref) {
1257 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1258 cur_state = !!(val & PIPECONF_ENABLE);
1259
1260 intel_display_power_put(dev_priv, power_domain, wakeref);
1261 } else {
1262 cur_state = false;
1263 }
1264
1265 I915_STATE_WARN(cur_state != state,
1266 "pipe %c assertion failure (expected %s, current %s)\n",
1267 pipe_name(pipe), onoff(state), onoff(cur_state));
1268 }
1269
1270 static void assert_plane(struct intel_plane *plane, bool state)
1271 {
1272 enum pipe pipe;
1273 bool cur_state;
1274
1275 cur_state = plane->get_hw_state(plane, &pipe);
1276
1277 I915_STATE_WARN(cur_state != state,
1278 "%s assertion failure (expected %s, current %s)\n",
1279 plane->base.name, onoff(state), onoff(cur_state));
1280 }
1281
1282 #define assert_plane_enabled(p) assert_plane(p, true)
1283 #define assert_plane_disabled(p) assert_plane(p, false)
1284
1285 static void assert_planes_disabled(struct intel_crtc *crtc)
1286 {
1287 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1288 struct intel_plane *plane;
1289
1290 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1291 assert_plane_disabled(plane);
1292 }
1293
1294 static void assert_vblank_disabled(struct drm_crtc *crtc)
1295 {
1296 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1297 drm_crtc_vblank_put(crtc);
1298 }
1299
1300 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1301 enum pipe pipe)
1302 {
1303 u32 val;
1304 bool enabled;
1305
1306 val = I915_READ(PCH_TRANSCONF(pipe));
1307 enabled = !!(val & TRANS_ENABLE);
1308 I915_STATE_WARN(enabled,
1309 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1310 pipe_name(pipe));
1311 }
1312
1313 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1314 enum pipe pipe, enum port port,
1315 i915_reg_t dp_reg)
1316 {
1317 enum pipe port_pipe;
1318 bool state;
1319
1320 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1321
1322 I915_STATE_WARN(state && port_pipe == pipe,
1323 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1324 port_name(port), pipe_name(pipe));
1325
1326 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1327 "IBX PCH DP %c still using transcoder B\n",
1328 port_name(port));
1329 }
1330
1331 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1332 enum pipe pipe, enum port port,
1333 i915_reg_t hdmi_reg)
1334 {
1335 enum pipe port_pipe;
1336 bool state;
1337
1338 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1339
1340 I915_STATE_WARN(state && port_pipe == pipe,
1341 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1342 port_name(port), pipe_name(pipe));
1343
1344 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1345 "IBX PCH HDMI %c still using transcoder B\n",
1346 port_name(port));
1347 }
1348
1349 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1350 enum pipe pipe)
1351 {
1352 enum pipe port_pipe;
1353
1354 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1357
1358 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1359 port_pipe == pipe,
1360 "PCH VGA enabled on transcoder %c, should be disabled\n",
1361 pipe_name(pipe));
1362
1363 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1364 port_pipe == pipe,
1365 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1366 pipe_name(pipe));
1367
1368 /* PCH SDVOB multiplex with HDMIB */
1369 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1372 }
1373
1374 static void _vlv_enable_pll(struct intel_crtc *crtc,
1375 const struct intel_crtc_state *pipe_config)
1376 {
1377 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1378 enum pipe pipe = crtc->pipe;
1379
1380 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1381 POSTING_READ(DPLL(pipe));
1382 udelay(150);
1383
1384 if (intel_wait_for_register(&dev_priv->uncore,
1385 DPLL(pipe),
1386 DPLL_LOCK_VLV,
1387 DPLL_LOCK_VLV,
1388 1))
1389 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1390 }
1391
1392 static void vlv_enable_pll(struct intel_crtc *crtc,
1393 const struct intel_crtc_state *pipe_config)
1394 {
1395 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1396 enum pipe pipe = crtc->pipe;
1397
1398 assert_pipe_disabled(dev_priv, pipe);
1399
1400 /* PLL is protected by panel, make sure we can write it */
1401 assert_panel_unlocked(dev_priv, pipe);
1402
1403 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1404 _vlv_enable_pll(crtc, pipe_config);
1405
1406 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1407 POSTING_READ(DPLL_MD(pipe));
1408 }
1409
1410
1411 static void _chv_enable_pll(struct intel_crtc *crtc,
1412 const struct intel_crtc_state *pipe_config)
1413 {
1414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1415 enum pipe pipe = crtc->pipe;
1416 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1417 u32 tmp;
1418
1419 vlv_dpio_get(dev_priv);
1420
1421 /* Enable back the 10bit clock to display controller */
1422 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1423 tmp |= DPIO_DCLKP_EN;
1424 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1425
1426 vlv_dpio_put(dev_priv);
1427
1428 /*
1429 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1430 */
1431 udelay(1);
1432
1433 /* Enable PLL */
1434 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1435
1436 /* Check PLL is locked */
1437 if (intel_wait_for_register(&dev_priv->uncore,
1438 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1439 1))
1440 DRM_ERROR("PLL %d failed to lock\n", pipe);
1441 }
1442
1443 static void chv_enable_pll(struct intel_crtc *crtc,
1444 const struct intel_crtc_state *pipe_config)
1445 {
1446 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1447 enum pipe pipe = crtc->pipe;
1448
1449 assert_pipe_disabled(dev_priv, pipe);
1450
1451 /* PLL is protected by panel, make sure we can write it */
1452 assert_panel_unlocked(dev_priv, pipe);
1453
1454 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1455 _chv_enable_pll(crtc, pipe_config);
1456
1457 if (pipe != PIPE_A) {
1458 /*
1459 * WaPixelRepeatModeFixForC0:chv
1460 *
1461 * DPLLCMD is AWOL. Use chicken bits to propagate
1462 * the value from DPLLBMD to either pipe B or C.
1463 */
1464 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1465 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1466 I915_WRITE(CBR4_VLV, 0);
1467 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1468
1469 /*
1470 * DPLLB VGA mode also seems to cause problems.
1471 * We should always have it disabled.
1472 */
1473 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1474 } else {
1475 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1476 POSTING_READ(DPLL_MD(pipe));
1477 }
1478 }
1479
1480 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1481 {
1482 if (IS_I830(dev_priv))
1483 return false;
1484
1485 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1486 }
1487
1488 static void i9xx_enable_pll(struct intel_crtc *crtc,
1489 const struct intel_crtc_state *crtc_state)
1490 {
1491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1492 i915_reg_t reg = DPLL(crtc->pipe);
1493 u32 dpll = crtc_state->dpll_hw_state.dpll;
1494 int i;
1495
1496 assert_pipe_disabled(dev_priv, crtc->pipe);
1497
1498 /* PLL is protected by panel, make sure we can write it */
1499 if (i9xx_has_pps(dev_priv))
1500 assert_panel_unlocked(dev_priv, crtc->pipe);
1501
1502 /*
1503 * Apparently we need to have VGA mode enabled prior to changing
1504 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1505 * dividers, even though the register value does change.
1506 */
1507 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1508 I915_WRITE(reg, dpll);
1509
1510 /* Wait for the clocks to stabilize. */
1511 POSTING_READ(reg);
1512 udelay(150);
1513
1514 if (INTEL_GEN(dev_priv) >= 4) {
1515 I915_WRITE(DPLL_MD(crtc->pipe),
1516 crtc_state->dpll_hw_state.dpll_md);
1517 } else {
1518 /* The pixel multiplier can only be updated once the
1519 * DPLL is enabled and the clocks are stable.
1520 *
1521 * So write it again.
1522 */
1523 I915_WRITE(reg, dpll);
1524 }
1525
1526 /* We do this three times for luck */
1527 for (i = 0; i < 3; i++) {
1528 I915_WRITE(reg, dpll);
1529 POSTING_READ(reg);
1530 udelay(150); /* wait for warmup */
1531 }
1532 }
1533
1534 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1535 {
1536 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1537 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1538 enum pipe pipe = crtc->pipe;
1539
1540 /* Don't disable pipe or pipe PLLs if needed */
1541 if (IS_I830(dev_priv))
1542 return;
1543
1544 /* Make sure the pipe isn't still relying on us */
1545 assert_pipe_disabled(dev_priv, pipe);
1546
1547 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1548 POSTING_READ(DPLL(pipe));
1549 }
1550
1551 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1552 {
1553 u32 val;
1554
1555 /* Make sure the pipe isn't still relying on us */
1556 assert_pipe_disabled(dev_priv, pipe);
1557
1558 val = DPLL_INTEGRATED_REF_CLK_VLV |
1559 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1560 if (pipe != PIPE_A)
1561 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1562
1563 I915_WRITE(DPLL(pipe), val);
1564 POSTING_READ(DPLL(pipe));
1565 }
1566
1567 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1568 {
1569 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1570 u32 val;
1571
1572 /* Make sure the pipe isn't still relying on us */
1573 assert_pipe_disabled(dev_priv, pipe);
1574
1575 val = DPLL_SSC_REF_CLK_CHV |
1576 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1577 if (pipe != PIPE_A)
1578 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1579
1580 I915_WRITE(DPLL(pipe), val);
1581 POSTING_READ(DPLL(pipe));
1582
1583 vlv_dpio_get(dev_priv);
1584
1585 /* Disable 10bit clock to display controller */
1586 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1587 val &= ~DPIO_DCLKP_EN;
1588 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1589
1590 vlv_dpio_put(dev_priv);
1591 }
1592
1593 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1594 struct intel_digital_port *dport,
1595 unsigned int expected_mask)
1596 {
1597 u32 port_mask;
1598 i915_reg_t dpll_reg;
1599
1600 switch (dport->base.port) {
1601 case PORT_B:
1602 port_mask = DPLL_PORTB_READY_MASK;
1603 dpll_reg = DPLL(0);
1604 break;
1605 case PORT_C:
1606 port_mask = DPLL_PORTC_READY_MASK;
1607 dpll_reg = DPLL(0);
1608 expected_mask <<= 4;
1609 break;
1610 case PORT_D:
1611 port_mask = DPLL_PORTD_READY_MASK;
1612 dpll_reg = DPIO_PHY_STATUS;
1613 break;
1614 default:
1615 BUG();
1616 }
1617
1618 if (intel_wait_for_register(&dev_priv->uncore,
1619 dpll_reg, port_mask, expected_mask,
1620 1000))
1621 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1622 port_name(dport->base.port),
1623 I915_READ(dpll_reg) & port_mask, expected_mask);
1624 }
1625
1626 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1627 {
1628 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1629 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1630 enum pipe pipe = crtc->pipe;
1631 i915_reg_t reg;
1632 u32 val, pipeconf_val;
1633
1634 /* Make sure PCH DPLL is enabled */
1635 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1636
1637 /* FDI must be feeding us bits for PCH ports */
1638 assert_fdi_tx_enabled(dev_priv, pipe);
1639 assert_fdi_rx_enabled(dev_priv, pipe);
1640
1641 if (HAS_PCH_CPT(dev_priv)) {
1642 /* Workaround: Set the timing override bit before enabling the
1643 * pch transcoder. */
1644 reg = TRANS_CHICKEN2(pipe);
1645 val = I915_READ(reg);
1646 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1647 I915_WRITE(reg, val);
1648 }
1649
1650 reg = PCH_TRANSCONF(pipe);
1651 val = I915_READ(reg);
1652 pipeconf_val = I915_READ(PIPECONF(pipe));
1653
1654 if (HAS_PCH_IBX(dev_priv)) {
1655 /*
1656 * Make the BPC in transcoder be consistent with
1657 * that in pipeconf reg. For HDMI we must use 8bpc
1658 * here for both 8bpc and 12bpc.
1659 */
1660 val &= ~PIPECONF_BPC_MASK;
1661 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1662 val |= PIPECONF_8BPC;
1663 else
1664 val |= pipeconf_val & PIPECONF_BPC_MASK;
1665 }
1666
1667 val &= ~TRANS_INTERLACE_MASK;
1668 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1669 if (HAS_PCH_IBX(dev_priv) &&
1670 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1671 val |= TRANS_LEGACY_INTERLACED_ILK;
1672 else
1673 val |= TRANS_INTERLACED;
1674 } else {
1675 val |= TRANS_PROGRESSIVE;
1676 }
1677
1678 I915_WRITE(reg, val | TRANS_ENABLE);
1679 if (intel_wait_for_register(&dev_priv->uncore,
1680 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1681 100))
1682 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1683 }
1684
1685 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1686 enum transcoder cpu_transcoder)
1687 {
1688 u32 val, pipeconf_val;
1689
1690 /* FDI must be feeding us bits for PCH ports */
1691 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1692 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1693
1694 /* Workaround: set timing override bit. */
1695 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1696 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1697 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1698
1699 val = TRANS_ENABLE;
1700 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1701
1702 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1703 PIPECONF_INTERLACED_ILK)
1704 val |= TRANS_INTERLACED;
1705 else
1706 val |= TRANS_PROGRESSIVE;
1707
1708 I915_WRITE(LPT_TRANSCONF, val);
1709 if (intel_wait_for_register(&dev_priv->uncore,
1710 LPT_TRANSCONF,
1711 TRANS_STATE_ENABLE,
1712 TRANS_STATE_ENABLE,
1713 100))
1714 DRM_ERROR("Failed to enable PCH transcoder\n");
1715 }
1716
1717 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1718 enum pipe pipe)
1719 {
1720 i915_reg_t reg;
1721 u32 val;
1722
1723 /* FDI relies on the transcoder */
1724 assert_fdi_tx_disabled(dev_priv, pipe);
1725 assert_fdi_rx_disabled(dev_priv, pipe);
1726
1727 /* Ports must be off as well */
1728 assert_pch_ports_disabled(dev_priv, pipe);
1729
1730 reg = PCH_TRANSCONF(pipe);
1731 val = I915_READ(reg);
1732 val &= ~TRANS_ENABLE;
1733 I915_WRITE(reg, val);
1734 /* wait for PCH transcoder off, transcoder state */
1735 if (intel_wait_for_register(&dev_priv->uncore,
1736 reg, TRANS_STATE_ENABLE, 0,
1737 50))
1738 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1739
1740 if (HAS_PCH_CPT(dev_priv)) {
1741 /* Workaround: Clear the timing override chicken bit again. */
1742 reg = TRANS_CHICKEN2(pipe);
1743 val = I915_READ(reg);
1744 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1745 I915_WRITE(reg, val);
1746 }
1747 }
1748
1749 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1750 {
1751 u32 val;
1752
1753 val = I915_READ(LPT_TRANSCONF);
1754 val &= ~TRANS_ENABLE;
1755 I915_WRITE(LPT_TRANSCONF, val);
1756 /* wait for PCH transcoder off, transcoder state */
1757 if (intel_wait_for_register(&dev_priv->uncore,
1758 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1759 50))
1760 DRM_ERROR("Failed to disable PCH transcoder\n");
1761
1762 /* Workaround: clear timing override bit. */
1763 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1764 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1765 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1766 }
1767
1768 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1769 {
1770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1771
1772 if (HAS_PCH_LPT(dev_priv))
1773 return PIPE_A;
1774 else
1775 return crtc->pipe;
1776 }
1777
1778 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1779 {
1780 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1781
1782 /*
1783 * On i965gm the hardware frame counter reads
1784 * zero when the TV encoder is enabled :(
1785 */
1786 if (IS_I965GM(dev_priv) &&
1787 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1788 return 0;
1789
1790 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1791 return 0xffffffff; /* full 32 bit counter */
1792 else if (INTEL_GEN(dev_priv) >= 3)
1793 return 0xffffff; /* only 24 bits of frame count */
1794 else
1795 return 0; /* Gen2 doesn't have a hardware frame counter */
1796 }
1797
1798 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1799 {
1800 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1801
1802 drm_crtc_set_max_vblank_count(&crtc->base,
1803 intel_crtc_max_vblank_count(crtc_state));
1804 drm_crtc_vblank_on(&crtc->base);
1805 }
1806
1807 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1808 {
1809 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1810 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1811 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1812 enum pipe pipe = crtc->pipe;
1813 i915_reg_t reg;
1814 u32 val;
1815
1816 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1817
1818 assert_planes_disabled(crtc);
1819
1820 /*
1821 * A pipe without a PLL won't actually be able to drive bits from
1822 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1823 * need the check.
1824 */
1825 if (HAS_GMCH(dev_priv)) {
1826 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1827 assert_dsi_pll_enabled(dev_priv);
1828 else
1829 assert_pll_enabled(dev_priv, pipe);
1830 } else {
1831 if (new_crtc_state->has_pch_encoder) {
1832 /* if driving the PCH, we need FDI enabled */
1833 assert_fdi_rx_pll_enabled(dev_priv,
1834 intel_crtc_pch_transcoder(crtc));
1835 assert_fdi_tx_pll_enabled(dev_priv,
1836 (enum pipe) cpu_transcoder);
1837 }
1838 /* FIXME: assert CPU port conditions for SNB+ */
1839 }
1840
1841 trace_intel_pipe_enable(dev_priv, pipe);
1842
1843 reg = PIPECONF(cpu_transcoder);
1844 val = I915_READ(reg);
1845 if (val & PIPECONF_ENABLE) {
1846 /* we keep both pipes enabled on 830 */
1847 WARN_ON(!IS_I830(dev_priv));
1848 return;
1849 }
1850
1851 I915_WRITE(reg, val | PIPECONF_ENABLE);
1852 POSTING_READ(reg);
1853
1854 /*
1855 * Until the pipe starts PIPEDSL reads will return a stale value,
1856 * which causes an apparent vblank timestamp jump when PIPEDSL
1857 * resets to its proper value. That also messes up the frame count
1858 * when it's derived from the timestamps. So let's wait for the
1859 * pipe to start properly before we call drm_crtc_vblank_on()
1860 */
1861 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1862 intel_wait_for_pipe_scanline_moving(crtc);
1863 }
1864
1865 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1866 {
1867 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1869 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1870 enum pipe pipe = crtc->pipe;
1871 i915_reg_t reg;
1872 u32 val;
1873
1874 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1875
1876 /*
1877 * Make sure planes won't keep trying to pump pixels to us,
1878 * or we might hang the display.
1879 */
1880 assert_planes_disabled(crtc);
1881
1882 trace_intel_pipe_disable(dev_priv, pipe);
1883
1884 reg = PIPECONF(cpu_transcoder);
1885 val = I915_READ(reg);
1886 if ((val & PIPECONF_ENABLE) == 0)
1887 return;
1888
1889 /*
1890 * Double wide has implications for planes
1891 * so best keep it disabled when not needed.
1892 */
1893 if (old_crtc_state->double_wide)
1894 val &= ~PIPECONF_DOUBLE_WIDE;
1895
1896 /* Don't disable pipe or pipe PLLs if needed */
1897 if (!IS_I830(dev_priv))
1898 val &= ~PIPECONF_ENABLE;
1899
1900 I915_WRITE(reg, val);
1901 if ((val & PIPECONF_ENABLE) == 0)
1902 intel_wait_for_pipe_off(old_crtc_state);
1903 }
1904
1905 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1906 {
1907 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1908 }
1909
1910 static unsigned int
1911 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1912 {
1913 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1914 unsigned int cpp = fb->format->cpp[color_plane];
1915
1916 switch (fb->modifier) {
1917 case DRM_FORMAT_MOD_LINEAR:
1918 return intel_tile_size(dev_priv);
1919 case I915_FORMAT_MOD_X_TILED:
1920 if (IS_GEN(dev_priv, 2))
1921 return 128;
1922 else
1923 return 512;
1924 case I915_FORMAT_MOD_Y_TILED_CCS:
1925 if (color_plane == 1)
1926 return 128;
1927 /* fall through */
1928 case I915_FORMAT_MOD_Y_TILED:
1929 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1930 return 128;
1931 else
1932 return 512;
1933 case I915_FORMAT_MOD_Yf_TILED_CCS:
1934 if (color_plane == 1)
1935 return 128;
1936 /* fall through */
1937 case I915_FORMAT_MOD_Yf_TILED:
1938 switch (cpp) {
1939 case 1:
1940 return 64;
1941 case 2:
1942 case 4:
1943 return 128;
1944 case 8:
1945 case 16:
1946 return 256;
1947 default:
1948 MISSING_CASE(cpp);
1949 return cpp;
1950 }
1951 break;
1952 default:
1953 MISSING_CASE(fb->modifier);
1954 return cpp;
1955 }
1956 }
1957
1958 static unsigned int
1959 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1960 {
1961 return intel_tile_size(to_i915(fb->dev)) /
1962 intel_tile_width_bytes(fb, color_plane);
1963 }
1964
1965 /* Return the tile dimensions in pixel units */
1966 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1967 unsigned int *tile_width,
1968 unsigned int *tile_height)
1969 {
1970 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1971 unsigned int cpp = fb->format->cpp[color_plane];
1972
1973 *tile_width = tile_width_bytes / cpp;
1974 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1975 }
1976
1977 unsigned int
1978 intel_fb_align_height(const struct drm_framebuffer *fb,
1979 int color_plane, unsigned int height)
1980 {
1981 unsigned int tile_height = intel_tile_height(fb, color_plane);
1982
1983 return ALIGN(height, tile_height);
1984 }
1985
1986 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1987 {
1988 unsigned int size = 0;
1989 int i;
1990
1991 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1992 size += rot_info->plane[i].width * rot_info->plane[i].height;
1993
1994 return size;
1995 }
1996
1997 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1998 {
1999 unsigned int size = 0;
2000 int i;
2001
2002 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2003 size += rem_info->plane[i].width * rem_info->plane[i].height;
2004
2005 return size;
2006 }
2007
2008 static void
2009 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2010 const struct drm_framebuffer *fb,
2011 unsigned int rotation)
2012 {
2013 view->type = I915_GGTT_VIEW_NORMAL;
2014 if (drm_rotation_90_or_270(rotation)) {
2015 view->type = I915_GGTT_VIEW_ROTATED;
2016 view->rotated = to_intel_framebuffer(fb)->rot_info;
2017 }
2018 }
2019
2020 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2021 {
2022 if (IS_I830(dev_priv))
2023 return 16 * 1024;
2024 else if (IS_I85X(dev_priv))
2025 return 256;
2026 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2027 return 32;
2028 else
2029 return 4 * 1024;
2030 }
2031
2032 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2033 {
2034 if (INTEL_GEN(dev_priv) >= 9)
2035 return 256 * 1024;
2036 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2037 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2038 return 128 * 1024;
2039 else if (INTEL_GEN(dev_priv) >= 4)
2040 return 4 * 1024;
2041 else
2042 return 0;
2043 }
2044
2045 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2046 int color_plane)
2047 {
2048 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2049
2050 /* AUX_DIST needs only 4K alignment */
2051 if (color_plane == 1)
2052 return 4096;
2053
2054 switch (fb->modifier) {
2055 case DRM_FORMAT_MOD_LINEAR:
2056 return intel_linear_alignment(dev_priv);
2057 case I915_FORMAT_MOD_X_TILED:
2058 if (INTEL_GEN(dev_priv) >= 9)
2059 return 256 * 1024;
2060 return 0;
2061 case I915_FORMAT_MOD_Y_TILED_CCS:
2062 case I915_FORMAT_MOD_Yf_TILED_CCS:
2063 case I915_FORMAT_MOD_Y_TILED:
2064 case I915_FORMAT_MOD_Yf_TILED:
2065 return 1 * 1024 * 1024;
2066 default:
2067 MISSING_CASE(fb->modifier);
2068 return 0;
2069 }
2070 }
2071
2072 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2073 {
2074 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2075 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2076
2077 return INTEL_GEN(dev_priv) < 4 ||
2078 (plane->has_fbc &&
2079 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2080 }
2081
2082 struct i915_vma *
2083 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2084 const struct i915_ggtt_view *view,
2085 bool uses_fence,
2086 unsigned long *out_flags)
2087 {
2088 struct drm_device *dev = fb->dev;
2089 struct drm_i915_private *dev_priv = to_i915(dev);
2090 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2091 intel_wakeref_t wakeref;
2092 struct i915_vma *vma;
2093 unsigned int pinctl;
2094 u32 alignment;
2095
2096 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2097
2098 alignment = intel_surf_alignment(fb, 0);
2099
2100 /* Note that the w/a also requires 64 PTE of padding following the
2101 * bo. We currently fill all unused PTE with the shadow page and so
2102 * we should always have valid PTE following the scanout preventing
2103 * the VT-d warning.
2104 */
2105 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2106 alignment = 256 * 1024;
2107
2108 /*
2109 * Global gtt pte registers are special registers which actually forward
2110 * writes to a chunk of system memory. Which means that there is no risk
2111 * that the register values disappear as soon as we call
2112 * intel_runtime_pm_put(), so it is correct to wrap only the
2113 * pin/unpin/fence and not more.
2114 */
2115 wakeref = intel_runtime_pm_get(dev_priv);
2116 i915_gem_object_lock(obj);
2117
2118 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2119
2120 pinctl = 0;
2121
2122 /* Valleyview is definitely limited to scanning out the first
2123 * 512MiB. Lets presume this behaviour was inherited from the
2124 * g4x display engine and that all earlier gen are similarly
2125 * limited. Testing suggests that it is a little more
2126 * complicated than this. For example, Cherryview appears quite
2127 * happy to scanout from anywhere within its global aperture.
2128 */
2129 if (HAS_GMCH(dev_priv))
2130 pinctl |= PIN_MAPPABLE;
2131
2132 vma = i915_gem_object_pin_to_display_plane(obj,
2133 alignment, view, pinctl);
2134 if (IS_ERR(vma))
2135 goto err;
2136
2137 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2138 int ret;
2139
2140 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2141 * fence, whereas 965+ only requires a fence if using
2142 * framebuffer compression. For simplicity, we always, when
2143 * possible, install a fence as the cost is not that onerous.
2144 *
2145 * If we fail to fence the tiled scanout, then either the
2146 * modeset will reject the change (which is highly unlikely as
2147 * the affected systems, all but one, do not have unmappable
2148 * space) or we will not be able to enable full powersaving
2149 * techniques (also likely not to apply due to various limits
2150 * FBC and the like impose on the size of the buffer, which
2151 * presumably we violated anyway with this unmappable buffer).
2152 * Anyway, it is presumably better to stumble onwards with
2153 * something and try to run the system in a "less than optimal"
2154 * mode that matches the user configuration.
2155 */
2156 ret = i915_vma_pin_fence(vma);
2157 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2158 i915_gem_object_unpin_from_display_plane(vma);
2159 vma = ERR_PTR(ret);
2160 goto err;
2161 }
2162
2163 if (ret == 0 && vma->fence)
2164 *out_flags |= PLANE_HAS_FENCE;
2165 }
2166
2167 i915_vma_get(vma);
2168 err:
2169 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2170
2171 i915_gem_object_unlock(obj);
2172 intel_runtime_pm_put(dev_priv, wakeref);
2173 return vma;
2174 }
2175
2176 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2177 {
2178 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2179
2180 i915_gem_object_lock(vma->obj);
2181 if (flags & PLANE_HAS_FENCE)
2182 i915_vma_unpin_fence(vma);
2183 i915_gem_object_unpin_from_display_plane(vma);
2184 i915_gem_object_unlock(vma->obj);
2185
2186 i915_vma_put(vma);
2187 }
2188
2189 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2190 unsigned int rotation)
2191 {
2192 if (drm_rotation_90_or_270(rotation))
2193 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2194 else
2195 return fb->pitches[color_plane];
2196 }
2197
2198 /*
2199 * Convert the x/y offsets into a linear offset.
2200 * Only valid with 0/180 degree rotation, which is fine since linear
2201 * offset is only used with linear buffers on pre-hsw and tiled buffers
2202 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2203 */
2204 u32 intel_fb_xy_to_linear(int x, int y,
2205 const struct intel_plane_state *state,
2206 int color_plane)
2207 {
2208 const struct drm_framebuffer *fb = state->base.fb;
2209 unsigned int cpp = fb->format->cpp[color_plane];
2210 unsigned int pitch = state->color_plane[color_plane].stride;
2211
2212 return y * pitch + x * cpp;
2213 }
2214
2215 /*
2216 * Add the x/y offsets derived from fb->offsets[] to the user
2217 * specified plane src x/y offsets. The resulting x/y offsets
2218 * specify the start of scanout from the beginning of the gtt mapping.
2219 */
2220 void intel_add_fb_offsets(int *x, int *y,
2221 const struct intel_plane_state *state,
2222 int color_plane)
2223
2224 {
2225 *x += state->color_plane[color_plane].x;
2226 *y += state->color_plane[color_plane].y;
2227 }
2228
2229 static u32 intel_adjust_tile_offset(int *x, int *y,
2230 unsigned int tile_width,
2231 unsigned int tile_height,
2232 unsigned int tile_size,
2233 unsigned int pitch_tiles,
2234 u32 old_offset,
2235 u32 new_offset)
2236 {
2237 unsigned int pitch_pixels = pitch_tiles * tile_width;
2238 unsigned int tiles;
2239
2240 WARN_ON(old_offset & (tile_size - 1));
2241 WARN_ON(new_offset & (tile_size - 1));
2242 WARN_ON(new_offset > old_offset);
2243
2244 tiles = (old_offset - new_offset) / tile_size;
2245
2246 *y += tiles / pitch_tiles * tile_height;
2247 *x += tiles % pitch_tiles * tile_width;
2248
2249 /* minimize x in case it got needlessly big */
2250 *y += *x / pitch_pixels * tile_height;
2251 *x %= pitch_pixels;
2252
2253 return new_offset;
2254 }
2255
2256 static bool is_surface_linear(u64 modifier, int color_plane)
2257 {
2258 return modifier == DRM_FORMAT_MOD_LINEAR;
2259 }
2260
2261 static u32 intel_adjust_aligned_offset(int *x, int *y,
2262 const struct drm_framebuffer *fb,
2263 int color_plane,
2264 unsigned int rotation,
2265 unsigned int pitch,
2266 u32 old_offset, u32 new_offset)
2267 {
2268 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2269 unsigned int cpp = fb->format->cpp[color_plane];
2270
2271 WARN_ON(new_offset > old_offset);
2272
2273 if (!is_surface_linear(fb->modifier, color_plane)) {
2274 unsigned int tile_size, tile_width, tile_height;
2275 unsigned int pitch_tiles;
2276
2277 tile_size = intel_tile_size(dev_priv);
2278 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2279
2280 if (drm_rotation_90_or_270(rotation)) {
2281 pitch_tiles = pitch / tile_height;
2282 swap(tile_width, tile_height);
2283 } else {
2284 pitch_tiles = pitch / (tile_width * cpp);
2285 }
2286
2287 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2288 tile_size, pitch_tiles,
2289 old_offset, new_offset);
2290 } else {
2291 old_offset += *y * pitch + *x * cpp;
2292
2293 *y = (old_offset - new_offset) / pitch;
2294 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2295 }
2296
2297 return new_offset;
2298 }
2299
2300 /*
2301 * Adjust the tile offset by moving the difference into
2302 * the x/y offsets.
2303 */
2304 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2305 const struct intel_plane_state *state,
2306 int color_plane,
2307 u32 old_offset, u32 new_offset)
2308 {
2309 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2310 state->base.rotation,
2311 state->color_plane[color_plane].stride,
2312 old_offset, new_offset);
2313 }
2314
2315 /*
2316 * Computes the aligned offset to the base tile and adjusts
2317 * x, y. bytes per pixel is assumed to be a power-of-two.
2318 *
2319 * In the 90/270 rotated case, x and y are assumed
2320 * to be already rotated to match the rotated GTT view, and
2321 * pitch is the tile_height aligned framebuffer height.
2322 *
2323 * This function is used when computing the derived information
2324 * under intel_framebuffer, so using any of that information
2325 * here is not allowed. Anything under drm_framebuffer can be
2326 * used. This is why the user has to pass in the pitch since it
2327 * is specified in the rotated orientation.
2328 */
2329 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2330 int *x, int *y,
2331 const struct drm_framebuffer *fb,
2332 int color_plane,
2333 unsigned int pitch,
2334 unsigned int rotation,
2335 u32 alignment)
2336 {
2337 unsigned int cpp = fb->format->cpp[color_plane];
2338 u32 offset, offset_aligned;
2339
2340 if (alignment)
2341 alignment--;
2342
2343 if (!is_surface_linear(fb->modifier, color_plane)) {
2344 unsigned int tile_size, tile_width, tile_height;
2345 unsigned int tile_rows, tiles, pitch_tiles;
2346
2347 tile_size = intel_tile_size(dev_priv);
2348 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2349
2350 if (drm_rotation_90_or_270(rotation)) {
2351 pitch_tiles = pitch / tile_height;
2352 swap(tile_width, tile_height);
2353 } else {
2354 pitch_tiles = pitch / (tile_width * cpp);
2355 }
2356
2357 tile_rows = *y / tile_height;
2358 *y %= tile_height;
2359
2360 tiles = *x / tile_width;
2361 *x %= tile_width;
2362
2363 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2364 offset_aligned = offset & ~alignment;
2365
2366 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2367 tile_size, pitch_tiles,
2368 offset, offset_aligned);
2369 } else {
2370 offset = *y * pitch + *x * cpp;
2371 offset_aligned = offset & ~alignment;
2372
2373 *y = (offset & alignment) / pitch;
2374 *x = ((offset & alignment) - *y * pitch) / cpp;
2375 }
2376
2377 return offset_aligned;
2378 }
2379
2380 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2381 const struct intel_plane_state *state,
2382 int color_plane)
2383 {
2384 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2385 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2386 const struct drm_framebuffer *fb = state->base.fb;
2387 unsigned int rotation = state->base.rotation;
2388 int pitch = state->color_plane[color_plane].stride;
2389 u32 alignment;
2390
2391 if (intel_plane->id == PLANE_CURSOR)
2392 alignment = intel_cursor_alignment(dev_priv);
2393 else
2394 alignment = intel_surf_alignment(fb, color_plane);
2395
2396 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2397 pitch, rotation, alignment);
2398 }
2399
2400 /* Convert the fb->offset[] into x/y offsets */
2401 static int intel_fb_offset_to_xy(int *x, int *y,
2402 const struct drm_framebuffer *fb,
2403 int color_plane)
2404 {
2405 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2406 unsigned int height;
2407
2408 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2409 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2410 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2411 fb->offsets[color_plane], color_plane);
2412 return -EINVAL;
2413 }
2414
2415 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2416 height = ALIGN(height, intel_tile_height(fb, color_plane));
2417
2418 /* Catch potential overflows early */
2419 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2420 fb->offsets[color_plane])) {
2421 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2422 fb->offsets[color_plane], fb->pitches[color_plane],
2423 color_plane);
2424 return -ERANGE;
2425 }
2426
2427 *x = 0;
2428 *y = 0;
2429
2430 intel_adjust_aligned_offset(x, y,
2431 fb, color_plane, DRM_MODE_ROTATE_0,
2432 fb->pitches[color_plane],
2433 fb->offsets[color_plane], 0);
2434
2435 return 0;
2436 }
2437
2438 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2439 {
2440 switch (fb_modifier) {
2441 case I915_FORMAT_MOD_X_TILED:
2442 return I915_TILING_X;
2443 case I915_FORMAT_MOD_Y_TILED:
2444 case I915_FORMAT_MOD_Y_TILED_CCS:
2445 return I915_TILING_Y;
2446 default:
2447 return I915_TILING_NONE;
2448 }
2449 }
2450
2451 /*
2452 * From the Sky Lake PRM:
2453 * "The Color Control Surface (CCS) contains the compression status of
2454 * the cache-line pairs. The compression state of the cache-line pair
2455 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2456 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2457 * cache-line-pairs. CCS is always Y tiled."
2458 *
2459 * Since cache line pairs refers to horizontally adjacent cache lines,
2460 * each cache line in the CCS corresponds to an area of 32x16 cache
2461 * lines on the main surface. Since each pixel is 4 bytes, this gives
2462 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2463 * main surface.
2464 */
2465 static const struct drm_format_info ccs_formats[] = {
2466 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2467 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2468 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2469 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2470 };
2471
2472 static const struct drm_format_info *
2473 lookup_format_info(const struct drm_format_info formats[],
2474 int num_formats, u32 format)
2475 {
2476 int i;
2477
2478 for (i = 0; i < num_formats; i++) {
2479 if (formats[i].format == format)
2480 return &formats[i];
2481 }
2482
2483 return NULL;
2484 }
2485
2486 static const struct drm_format_info *
2487 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2488 {
2489 switch (cmd->modifier[0]) {
2490 case I915_FORMAT_MOD_Y_TILED_CCS:
2491 case I915_FORMAT_MOD_Yf_TILED_CCS:
2492 return lookup_format_info(ccs_formats,
2493 ARRAY_SIZE(ccs_formats),
2494 cmd->pixel_format);
2495 default:
2496 return NULL;
2497 }
2498 }
2499
2500 bool is_ccs_modifier(u64 modifier)
2501 {
2502 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2503 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2504 }
2505
2506 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2507 u32 pixel_format, u64 modifier)
2508 {
2509 struct intel_crtc *crtc;
2510 struct intel_plane *plane;
2511
2512 /*
2513 * We assume the primary plane for pipe A has
2514 * the highest stride limits of them all.
2515 */
2516 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2517 plane = to_intel_plane(crtc->base.primary);
2518
2519 return plane->max_stride(plane, pixel_format, modifier,
2520 DRM_MODE_ROTATE_0);
2521 }
2522
2523 static
2524 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2525 u32 pixel_format, u64 modifier)
2526 {
2527 /*
2528 * Arbitrary limit for gen4+ chosen to match the
2529 * render engine max stride.
2530 *
2531 * The new CCS hash mode makes remapping impossible
2532 */
2533 if (!is_ccs_modifier(modifier)) {
2534 if (INTEL_GEN(dev_priv) >= 7)
2535 return 256*1024;
2536 else if (INTEL_GEN(dev_priv) >= 4)
2537 return 128*1024;
2538 }
2539
2540 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2541 }
2542
2543 static u32
2544 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2545 {
2546 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2547
2548 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2549 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2550 fb->format->format,
2551 fb->modifier);
2552
2553 /*
2554 * To make remapping with linear generally feasible
2555 * we need the stride to be page aligned.
2556 */
2557 if (fb->pitches[color_plane] > max_stride)
2558 return intel_tile_size(dev_priv);
2559 else
2560 return 64;
2561 } else {
2562 return intel_tile_width_bytes(fb, color_plane);
2563 }
2564 }
2565
2566 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2567 {
2568 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2569 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2570 const struct drm_framebuffer *fb = plane_state->base.fb;
2571 int i;
2572
2573 /* We don't want to deal with remapping with cursors */
2574 if (plane->id == PLANE_CURSOR)
2575 return false;
2576
2577 /*
2578 * The display engine limits already match/exceed the
2579 * render engine limits, so not much point in remapping.
2580 * Would also need to deal with the fence POT alignment
2581 * and gen2 2KiB GTT tile size.
2582 */
2583 if (INTEL_GEN(dev_priv) < 4)
2584 return false;
2585
2586 /*
2587 * The new CCS hash mode isn't compatible with remapping as
2588 * the virtual address of the pages affects the compressed data.
2589 */
2590 if (is_ccs_modifier(fb->modifier))
2591 return false;
2592
2593 /* Linear needs a page aligned stride for remapping */
2594 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2595 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2596
2597 for (i = 0; i < fb->format->num_planes; i++) {
2598 if (fb->pitches[i] & alignment)
2599 return false;
2600 }
2601 }
2602
2603 return true;
2604 }
2605
2606 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2607 {
2608 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2609 const struct drm_framebuffer *fb = plane_state->base.fb;
2610 unsigned int rotation = plane_state->base.rotation;
2611 u32 stride, max_stride;
2612
2613 /*
2614 * No remapping for invisible planes since we don't have
2615 * an actual source viewport to remap.
2616 */
2617 if (!plane_state->base.visible)
2618 return false;
2619
2620 if (!intel_plane_can_remap(plane_state))
2621 return false;
2622
2623 /*
2624 * FIXME: aux plane limits on gen9+ are
2625 * unclear in Bspec, for now no checking.
2626 */
2627 stride = intel_fb_pitch(fb, 0, rotation);
2628 max_stride = plane->max_stride(plane, fb->format->format,
2629 fb->modifier, rotation);
2630
2631 return stride > max_stride;
2632 }
2633
2634 static int
2635 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2636 struct drm_framebuffer *fb)
2637 {
2638 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2639 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2640 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2641 u32 gtt_offset_rotated = 0;
2642 unsigned int max_size = 0;
2643 int i, num_planes = fb->format->num_planes;
2644 unsigned int tile_size = intel_tile_size(dev_priv);
2645
2646 for (i = 0; i < num_planes; i++) {
2647 unsigned int width, height;
2648 unsigned int cpp, size;
2649 u32 offset;
2650 int x, y;
2651 int ret;
2652
2653 cpp = fb->format->cpp[i];
2654 width = drm_framebuffer_plane_width(fb->width, fb, i);
2655 height = drm_framebuffer_plane_height(fb->height, fb, i);
2656
2657 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2658 if (ret) {
2659 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2660 i, fb->offsets[i]);
2661 return ret;
2662 }
2663
2664 if (is_ccs_modifier(fb->modifier) && i == 1) {
2665 int hsub = fb->format->hsub;
2666 int vsub = fb->format->vsub;
2667 int tile_width, tile_height;
2668 int main_x, main_y;
2669 int ccs_x, ccs_y;
2670
2671 intel_tile_dims(fb, i, &tile_width, &tile_height);
2672 tile_width *= hsub;
2673 tile_height *= vsub;
2674
2675 ccs_x = (x * hsub) % tile_width;
2676 ccs_y = (y * vsub) % tile_height;
2677 main_x = intel_fb->normal[0].x % tile_width;
2678 main_y = intel_fb->normal[0].y % tile_height;
2679
2680 /*
2681 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2682 * x/y offsets must match between CCS and the main surface.
2683 */
2684 if (main_x != ccs_x || main_y != ccs_y) {
2685 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2686 main_x, main_y,
2687 ccs_x, ccs_y,
2688 intel_fb->normal[0].x,
2689 intel_fb->normal[0].y,
2690 x, y);
2691 return -EINVAL;
2692 }
2693 }
2694
2695 /*
2696 * The fence (if used) is aligned to the start of the object
2697 * so having the framebuffer wrap around across the edge of the
2698 * fenced region doesn't really work. We have no API to configure
2699 * the fence start offset within the object (nor could we probably
2700 * on gen2/3). So it's just easier if we just require that the
2701 * fb layout agrees with the fence layout. We already check that the
2702 * fb stride matches the fence stride elsewhere.
2703 */
2704 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2705 (x + width) * cpp > fb->pitches[i]) {
2706 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2707 i, fb->offsets[i]);
2708 return -EINVAL;
2709 }
2710
2711 /*
2712 * First pixel of the framebuffer from
2713 * the start of the normal gtt mapping.
2714 */
2715 intel_fb->normal[i].x = x;
2716 intel_fb->normal[i].y = y;
2717
2718 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2719 fb->pitches[i],
2720 DRM_MODE_ROTATE_0,
2721 tile_size);
2722 offset /= tile_size;
2723
2724 if (!is_surface_linear(fb->modifier, i)) {
2725 unsigned int tile_width, tile_height;
2726 unsigned int pitch_tiles;
2727 struct drm_rect r;
2728
2729 intel_tile_dims(fb, i, &tile_width, &tile_height);
2730
2731 rot_info->plane[i].offset = offset;
2732 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2733 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2734 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2735
2736 intel_fb->rotated[i].pitch =
2737 rot_info->plane[i].height * tile_height;
2738
2739 /* how many tiles does this plane need */
2740 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2741 /*
2742 * If the plane isn't horizontally tile aligned,
2743 * we need one more tile.
2744 */
2745 if (x != 0)
2746 size++;
2747
2748 /* rotate the x/y offsets to match the GTT view */
2749 r.x1 = x;
2750 r.y1 = y;
2751 r.x2 = x + width;
2752 r.y2 = y + height;
2753 drm_rect_rotate(&r,
2754 rot_info->plane[i].width * tile_width,
2755 rot_info->plane[i].height * tile_height,
2756 DRM_MODE_ROTATE_270);
2757 x = r.x1;
2758 y = r.y1;
2759
2760 /* rotate the tile dimensions to match the GTT view */
2761 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2762 swap(tile_width, tile_height);
2763
2764 /*
2765 * We only keep the x/y offsets, so push all of the
2766 * gtt offset into the x/y offsets.
2767 */
2768 intel_adjust_tile_offset(&x, &y,
2769 tile_width, tile_height,
2770 tile_size, pitch_tiles,
2771 gtt_offset_rotated * tile_size, 0);
2772
2773 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2774
2775 /*
2776 * First pixel of the framebuffer from
2777 * the start of the rotated gtt mapping.
2778 */
2779 intel_fb->rotated[i].x = x;
2780 intel_fb->rotated[i].y = y;
2781 } else {
2782 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2783 x * cpp, tile_size);
2784 }
2785
2786 /* how many tiles in total needed in the bo */
2787 max_size = max(max_size, offset + size);
2788 }
2789
2790 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2791 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2792 mul_u32_u32(max_size, tile_size), obj->base.size);
2793 return -EINVAL;
2794 }
2795
2796 return 0;
2797 }
2798
2799 static void
2800 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2801 {
2802 struct drm_i915_private *dev_priv =
2803 to_i915(plane_state->base.plane->dev);
2804 struct drm_framebuffer *fb = plane_state->base.fb;
2805 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2806 struct intel_rotation_info *info = &plane_state->view.rotated;
2807 unsigned int rotation = plane_state->base.rotation;
2808 int i, num_planes = fb->format->num_planes;
2809 unsigned int tile_size = intel_tile_size(dev_priv);
2810 unsigned int src_x, src_y;
2811 unsigned int src_w, src_h;
2812 u32 gtt_offset = 0;
2813
2814 memset(&plane_state->view, 0, sizeof(plane_state->view));
2815 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2816 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2817
2818 src_x = plane_state->base.src.x1 >> 16;
2819 src_y = plane_state->base.src.y1 >> 16;
2820 src_w = drm_rect_width(&plane_state->base.src) >> 16;
2821 src_h = drm_rect_height(&plane_state->base.src) >> 16;
2822
2823 WARN_ON(is_ccs_modifier(fb->modifier));
2824
2825 /* Make src coordinates relative to the viewport */
2826 drm_rect_translate(&plane_state->base.src,
2827 -(src_x << 16), -(src_y << 16));
2828
2829 /* Rotate src coordinates to match rotated GTT view */
2830 if (drm_rotation_90_or_270(rotation))
2831 drm_rect_rotate(&plane_state->base.src,
2832 src_w << 16, src_h << 16,
2833 DRM_MODE_ROTATE_270);
2834
2835 for (i = 0; i < num_planes; i++) {
2836 unsigned int hsub = i ? fb->format->hsub : 1;
2837 unsigned int vsub = i ? fb->format->vsub : 1;
2838 unsigned int cpp = fb->format->cpp[i];
2839 unsigned int tile_width, tile_height;
2840 unsigned int width, height;
2841 unsigned int pitch_tiles;
2842 unsigned int x, y;
2843 u32 offset;
2844
2845 intel_tile_dims(fb, i, &tile_width, &tile_height);
2846
2847 x = src_x / hsub;
2848 y = src_y / vsub;
2849 width = src_w / hsub;
2850 height = src_h / vsub;
2851
2852 /*
2853 * First pixel of the src viewport from the
2854 * start of the normal gtt mapping.
2855 */
2856 x += intel_fb->normal[i].x;
2857 y += intel_fb->normal[i].y;
2858
2859 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2860 fb, i, fb->pitches[i],
2861 DRM_MODE_ROTATE_0, tile_size);
2862 offset /= tile_size;
2863
2864 info->plane[i].offset = offset;
2865 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2866 tile_width * cpp);
2867 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2868 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2869
2870 if (drm_rotation_90_or_270(rotation)) {
2871 struct drm_rect r;
2872
2873 /* rotate the x/y offsets to match the GTT view */
2874 r.x1 = x;
2875 r.y1 = y;
2876 r.x2 = x + width;
2877 r.y2 = y + height;
2878 drm_rect_rotate(&r,
2879 info->plane[i].width * tile_width,
2880 info->plane[i].height * tile_height,
2881 DRM_MODE_ROTATE_270);
2882 x = r.x1;
2883 y = r.y1;
2884
2885 pitch_tiles = info->plane[i].height;
2886 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2887
2888 /* rotate the tile dimensions to match the GTT view */
2889 swap(tile_width, tile_height);
2890 } else {
2891 pitch_tiles = info->plane[i].width;
2892 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2893 }
2894
2895 /*
2896 * We only keep the x/y offsets, so push all of the
2897 * gtt offset into the x/y offsets.
2898 */
2899 intel_adjust_tile_offset(&x, &y,
2900 tile_width, tile_height,
2901 tile_size, pitch_tiles,
2902 gtt_offset * tile_size, 0);
2903
2904 gtt_offset += info->plane[i].width * info->plane[i].height;
2905
2906 plane_state->color_plane[i].offset = 0;
2907 plane_state->color_plane[i].x = x;
2908 plane_state->color_plane[i].y = y;
2909 }
2910 }
2911
2912 static int
2913 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2914 {
2915 const struct intel_framebuffer *fb =
2916 to_intel_framebuffer(plane_state->base.fb);
2917 unsigned int rotation = plane_state->base.rotation;
2918 int i, num_planes;
2919
2920 if (!fb)
2921 return 0;
2922
2923 num_planes = fb->base.format->num_planes;
2924
2925 if (intel_plane_needs_remap(plane_state)) {
2926 intel_plane_remap_gtt(plane_state);
2927
2928 /*
2929 * Sometimes even remapping can't overcome
2930 * the stride limitations :( Can happen with
2931 * big plane sizes and suitably misaligned
2932 * offsets.
2933 */
2934 return intel_plane_check_stride(plane_state);
2935 }
2936
2937 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2938
2939 for (i = 0; i < num_planes; i++) {
2940 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2941 plane_state->color_plane[i].offset = 0;
2942
2943 if (drm_rotation_90_or_270(rotation)) {
2944 plane_state->color_plane[i].x = fb->rotated[i].x;
2945 plane_state->color_plane[i].y = fb->rotated[i].y;
2946 } else {
2947 plane_state->color_plane[i].x = fb->normal[i].x;
2948 plane_state->color_plane[i].y = fb->normal[i].y;
2949 }
2950 }
2951
2952 /* Rotate src coordinates to match rotated GTT view */
2953 if (drm_rotation_90_or_270(rotation))
2954 drm_rect_rotate(&plane_state->base.src,
2955 fb->base.width << 16, fb->base.height << 16,
2956 DRM_MODE_ROTATE_270);
2957
2958 return intel_plane_check_stride(plane_state);
2959 }
2960
2961 static int i9xx_format_to_fourcc(int format)
2962 {
2963 switch (format) {
2964 case DISPPLANE_8BPP:
2965 return DRM_FORMAT_C8;
2966 case DISPPLANE_BGRX555:
2967 return DRM_FORMAT_XRGB1555;
2968 case DISPPLANE_BGRX565:
2969 return DRM_FORMAT_RGB565;
2970 default:
2971 case DISPPLANE_BGRX888:
2972 return DRM_FORMAT_XRGB8888;
2973 case DISPPLANE_RGBX888:
2974 return DRM_FORMAT_XBGR8888;
2975 case DISPPLANE_BGRX101010:
2976 return DRM_FORMAT_XRGB2101010;
2977 case DISPPLANE_RGBX101010:
2978 return DRM_FORMAT_XBGR2101010;
2979 }
2980 }
2981
2982 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2983 {
2984 switch (format) {
2985 case PLANE_CTL_FORMAT_RGB_565:
2986 return DRM_FORMAT_RGB565;
2987 case PLANE_CTL_FORMAT_NV12:
2988 return DRM_FORMAT_NV12;
2989 case PLANE_CTL_FORMAT_P010:
2990 return DRM_FORMAT_P010;
2991 case PLANE_CTL_FORMAT_P012:
2992 return DRM_FORMAT_P012;
2993 case PLANE_CTL_FORMAT_P016:
2994 return DRM_FORMAT_P016;
2995 case PLANE_CTL_FORMAT_Y210:
2996 return DRM_FORMAT_Y210;
2997 case PLANE_CTL_FORMAT_Y212:
2998 return DRM_FORMAT_Y212;
2999 case PLANE_CTL_FORMAT_Y216:
3000 return DRM_FORMAT_Y216;
3001 case PLANE_CTL_FORMAT_Y410:
3002 return DRM_FORMAT_XVYU2101010;
3003 case PLANE_CTL_FORMAT_Y412:
3004 return DRM_FORMAT_XVYU12_16161616;
3005 case PLANE_CTL_FORMAT_Y416:
3006 return DRM_FORMAT_XVYU16161616;
3007 default:
3008 case PLANE_CTL_FORMAT_XRGB_8888:
3009 if (rgb_order) {
3010 if (alpha)
3011 return DRM_FORMAT_ABGR8888;
3012 else
3013 return DRM_FORMAT_XBGR8888;
3014 } else {
3015 if (alpha)
3016 return DRM_FORMAT_ARGB8888;
3017 else
3018 return DRM_FORMAT_XRGB8888;
3019 }
3020 case PLANE_CTL_FORMAT_XRGB_2101010:
3021 if (rgb_order)
3022 return DRM_FORMAT_XBGR2101010;
3023 else
3024 return DRM_FORMAT_XRGB2101010;
3025 case PLANE_CTL_FORMAT_XRGB_16161616F:
3026 if (rgb_order) {
3027 if (alpha)
3028 return DRM_FORMAT_ABGR16161616F;
3029 else
3030 return DRM_FORMAT_XBGR16161616F;
3031 } else {
3032 if (alpha)
3033 return DRM_FORMAT_ARGB16161616F;
3034 else
3035 return DRM_FORMAT_XRGB16161616F;
3036 }
3037 }
3038 }
3039
3040 static bool
3041 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3042 struct intel_initial_plane_config *plane_config)
3043 {
3044 struct drm_device *dev = crtc->base.dev;
3045 struct drm_i915_private *dev_priv = to_i915(dev);
3046 struct drm_i915_gem_object *obj = NULL;
3047 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3048 struct drm_framebuffer *fb = &plane_config->fb->base;
3049 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3050 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3051 PAGE_SIZE);
3052
3053 size_aligned -= base_aligned;
3054
3055 if (plane_config->size == 0)
3056 return false;
3057
3058 /* If the FB is too big, just don't use it since fbdev is not very
3059 * important and we should probably use that space with FBC or other
3060 * features. */
3061 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3062 return false;
3063
3064 switch (fb->modifier) {
3065 case DRM_FORMAT_MOD_LINEAR:
3066 case I915_FORMAT_MOD_X_TILED:
3067 case I915_FORMAT_MOD_Y_TILED:
3068 break;
3069 default:
3070 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3071 fb->modifier);
3072 return false;
3073 }
3074
3075 mutex_lock(&dev->struct_mutex);
3076 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3077 base_aligned,
3078 base_aligned,
3079 size_aligned);
3080 mutex_unlock(&dev->struct_mutex);
3081 if (!obj)
3082 return false;
3083
3084 switch (plane_config->tiling) {
3085 case I915_TILING_NONE:
3086 break;
3087 case I915_TILING_X:
3088 case I915_TILING_Y:
3089 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3090 break;
3091 default:
3092 MISSING_CASE(plane_config->tiling);
3093 return false;
3094 }
3095
3096 mode_cmd.pixel_format = fb->format->format;
3097 mode_cmd.width = fb->width;
3098 mode_cmd.height = fb->height;
3099 mode_cmd.pitches[0] = fb->pitches[0];
3100 mode_cmd.modifier[0] = fb->modifier;
3101 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3102
3103 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3104 DRM_DEBUG_KMS("intel fb init failed\n");
3105 goto out_unref_obj;
3106 }
3107
3108
3109 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3110 return true;
3111
3112 out_unref_obj:
3113 i915_gem_object_put(obj);
3114 return false;
3115 }
3116
3117 static void
3118 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3119 struct intel_plane_state *plane_state,
3120 bool visible)
3121 {
3122 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3123
3124 plane_state->base.visible = visible;
3125
3126 if (visible)
3127 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3128 else
3129 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3130 }
3131
3132 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3133 {
3134 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3135 struct drm_plane *plane;
3136
3137 /*
3138 * Active_planes aliases if multiple "primary" or cursor planes
3139 * have been used on the same (or wrong) pipe. plane_mask uses
3140 * unique ids, hence we can use that to reconstruct active_planes.
3141 */
3142 crtc_state->active_planes = 0;
3143
3144 drm_for_each_plane_mask(plane, &dev_priv->drm,
3145 crtc_state->base.plane_mask)
3146 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3147 }
3148
3149 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3150 struct intel_plane *plane)
3151 {
3152 struct intel_crtc_state *crtc_state =
3153 to_intel_crtc_state(crtc->base.state);
3154 struct intel_plane_state *plane_state =
3155 to_intel_plane_state(plane->base.state);
3156
3157 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3158 plane->base.base.id, plane->base.name,
3159 crtc->base.base.id, crtc->base.name);
3160
3161 intel_set_plane_visible(crtc_state, plane_state, false);
3162 fixup_active_planes(crtc_state);
3163 crtc_state->data_rate[plane->id] = 0;
3164
3165 if (plane->id == PLANE_PRIMARY)
3166 intel_pre_disable_primary_noatomic(&crtc->base);
3167
3168 intel_disable_plane(plane, crtc_state);
3169 }
3170
3171 static void
3172 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3173 struct intel_initial_plane_config *plane_config)
3174 {
3175 struct drm_device *dev = intel_crtc->base.dev;
3176 struct drm_i915_private *dev_priv = to_i915(dev);
3177 struct drm_crtc *c;
3178 struct drm_i915_gem_object *obj;
3179 struct drm_plane *primary = intel_crtc->base.primary;
3180 struct drm_plane_state *plane_state = primary->state;
3181 struct intel_plane *intel_plane = to_intel_plane(primary);
3182 struct intel_plane_state *intel_state =
3183 to_intel_plane_state(plane_state);
3184 struct drm_framebuffer *fb;
3185
3186 if (!plane_config->fb)
3187 return;
3188
3189 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3190 fb = &plane_config->fb->base;
3191 goto valid_fb;
3192 }
3193
3194 kfree(plane_config->fb);
3195
3196 /*
3197 * Failed to alloc the obj, check to see if we should share
3198 * an fb with another CRTC instead
3199 */
3200 for_each_crtc(dev, c) {
3201 struct intel_plane_state *state;
3202
3203 if (c == &intel_crtc->base)
3204 continue;
3205
3206 if (!to_intel_crtc(c)->active)
3207 continue;
3208
3209 state = to_intel_plane_state(c->primary->state);
3210 if (!state->vma)
3211 continue;
3212
3213 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3214 fb = state->base.fb;
3215 drm_framebuffer_get(fb);
3216 goto valid_fb;
3217 }
3218 }
3219
3220 /*
3221 * We've failed to reconstruct the BIOS FB. Current display state
3222 * indicates that the primary plane is visible, but has a NULL FB,
3223 * which will lead to problems later if we don't fix it up. The
3224 * simplest solution is to just disable the primary plane now and
3225 * pretend the BIOS never had it enabled.
3226 */
3227 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3228
3229 return;
3230
3231 valid_fb:
3232 intel_state->base.rotation = plane_config->rotation;
3233 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3234 intel_state->base.rotation);
3235 intel_state->color_plane[0].stride =
3236 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3237
3238 mutex_lock(&dev->struct_mutex);
3239 intel_state->vma =
3240 intel_pin_and_fence_fb_obj(fb,
3241 &intel_state->view,
3242 intel_plane_uses_fence(intel_state),
3243 &intel_state->flags);
3244 mutex_unlock(&dev->struct_mutex);
3245 if (IS_ERR(intel_state->vma)) {
3246 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3247 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3248
3249 intel_state->vma = NULL;
3250 drm_framebuffer_put(fb);
3251 return;
3252 }
3253
3254 obj = intel_fb_obj(fb);
3255 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
3256
3257 plane_state->src_x = 0;
3258 plane_state->src_y = 0;
3259 plane_state->src_w = fb->width << 16;
3260 plane_state->src_h = fb->height << 16;
3261
3262 plane_state->crtc_x = 0;
3263 plane_state->crtc_y = 0;
3264 plane_state->crtc_w = fb->width;
3265 plane_state->crtc_h = fb->height;
3266
3267 intel_state->base.src = drm_plane_state_src(plane_state);
3268 intel_state->base.dst = drm_plane_state_dest(plane_state);
3269
3270 if (i915_gem_object_is_tiled(obj))
3271 dev_priv->preserve_bios_swizzle = true;
3272
3273 plane_state->fb = fb;
3274 plane_state->crtc = &intel_crtc->base;
3275
3276 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3277 &obj->frontbuffer_bits);
3278 }
3279
3280 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3281 int color_plane,
3282 unsigned int rotation)
3283 {
3284 int cpp = fb->format->cpp[color_plane];
3285
3286 switch (fb->modifier) {
3287 case DRM_FORMAT_MOD_LINEAR:
3288 case I915_FORMAT_MOD_X_TILED:
3289 return 4096;
3290 case I915_FORMAT_MOD_Y_TILED_CCS:
3291 case I915_FORMAT_MOD_Yf_TILED_CCS:
3292 /* FIXME AUX plane? */
3293 case I915_FORMAT_MOD_Y_TILED:
3294 case I915_FORMAT_MOD_Yf_TILED:
3295 if (cpp == 8)
3296 return 2048;
3297 else
3298 return 4096;
3299 default:
3300 MISSING_CASE(fb->modifier);
3301 return 2048;
3302 }
3303 }
3304
3305 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3306 int color_plane,
3307 unsigned int rotation)
3308 {
3309 int cpp = fb->format->cpp[color_plane];
3310
3311 switch (fb->modifier) {
3312 case DRM_FORMAT_MOD_LINEAR:
3313 case I915_FORMAT_MOD_X_TILED:
3314 if (cpp == 8)
3315 return 4096;
3316 else
3317 return 5120;
3318 case I915_FORMAT_MOD_Y_TILED_CCS:
3319 case I915_FORMAT_MOD_Yf_TILED_CCS:
3320 /* FIXME AUX plane? */
3321 case I915_FORMAT_MOD_Y_TILED:
3322 case I915_FORMAT_MOD_Yf_TILED:
3323 if (cpp == 8)
3324 return 2048;
3325 else
3326 return 5120;
3327 default:
3328 MISSING_CASE(fb->modifier);
3329 return 2048;
3330 }
3331 }
3332
3333 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3334 int color_plane,
3335 unsigned int rotation)
3336 {
3337 return 5120;
3338 }
3339
3340 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3341 int main_x, int main_y, u32 main_offset)
3342 {
3343 const struct drm_framebuffer *fb = plane_state->base.fb;
3344 int hsub = fb->format->hsub;
3345 int vsub = fb->format->vsub;
3346 int aux_x = plane_state->color_plane[1].x;
3347 int aux_y = plane_state->color_plane[1].y;
3348 u32 aux_offset = plane_state->color_plane[1].offset;
3349 u32 alignment = intel_surf_alignment(fb, 1);
3350
3351 while (aux_offset >= main_offset && aux_y <= main_y) {
3352 int x, y;
3353
3354 if (aux_x == main_x && aux_y == main_y)
3355 break;
3356
3357 if (aux_offset == 0)
3358 break;
3359
3360 x = aux_x / hsub;
3361 y = aux_y / vsub;
3362 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3363 aux_offset, aux_offset - alignment);
3364 aux_x = x * hsub + aux_x % hsub;
3365 aux_y = y * vsub + aux_y % vsub;
3366 }
3367
3368 if (aux_x != main_x || aux_y != main_y)
3369 return false;
3370
3371 plane_state->color_plane[1].offset = aux_offset;
3372 plane_state->color_plane[1].x = aux_x;
3373 plane_state->color_plane[1].y = aux_y;
3374
3375 return true;
3376 }
3377
3378 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3379 {
3380 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3381 const struct drm_framebuffer *fb = plane_state->base.fb;
3382 unsigned int rotation = plane_state->base.rotation;
3383 int x = plane_state->base.src.x1 >> 16;
3384 int y = plane_state->base.src.y1 >> 16;
3385 int w = drm_rect_width(&plane_state->base.src) >> 16;
3386 int h = drm_rect_height(&plane_state->base.src) >> 16;
3387 int max_width;
3388 int max_height = 4096;
3389 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3390
3391 if (INTEL_GEN(dev_priv) >= 11)
3392 max_width = icl_max_plane_width(fb, 0, rotation);
3393 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3394 max_width = glk_max_plane_width(fb, 0, rotation);
3395 else
3396 max_width = skl_max_plane_width(fb, 0, rotation);
3397
3398 if (w > max_width || h > max_height) {
3399 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3400 w, h, max_width, max_height);
3401 return -EINVAL;
3402 }
3403
3404 intel_add_fb_offsets(&x, &y, plane_state, 0);
3405 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3406 alignment = intel_surf_alignment(fb, 0);
3407
3408 /*
3409 * AUX surface offset is specified as the distance from the
3410 * main surface offset, and it must be non-negative. Make
3411 * sure that is what we will get.
3412 */
3413 if (offset > aux_offset)
3414 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3415 offset, aux_offset & ~(alignment - 1));
3416
3417 /*
3418 * When using an X-tiled surface, the plane blows up
3419 * if the x offset + width exceed the stride.
3420 *
3421 * TODO: linear and Y-tiled seem fine, Yf untested,
3422 */
3423 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3424 int cpp = fb->format->cpp[0];
3425
3426 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3427 if (offset == 0) {
3428 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3429 return -EINVAL;
3430 }
3431
3432 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3433 offset, offset - alignment);
3434 }
3435 }
3436
3437 /*
3438 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3439 * they match with the main surface x/y offsets.
3440 */
3441 if (is_ccs_modifier(fb->modifier)) {
3442 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3443 if (offset == 0)
3444 break;
3445
3446 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3447 offset, offset - alignment);
3448 }
3449
3450 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3451 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3452 return -EINVAL;
3453 }
3454 }
3455
3456 plane_state->color_plane[0].offset = offset;
3457 plane_state->color_plane[0].x = x;
3458 plane_state->color_plane[0].y = y;
3459
3460 /*
3461 * Put the final coordinates back so that the src
3462 * coordinate checks will see the right values.
3463 */
3464 drm_rect_translate(&plane_state->base.src,
3465 (x << 16) - plane_state->base.src.x1,
3466 (y << 16) - plane_state->base.src.y1);
3467
3468 return 0;
3469 }
3470
3471 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3472 {
3473 const struct drm_framebuffer *fb = plane_state->base.fb;
3474 unsigned int rotation = plane_state->base.rotation;
3475 int max_width = skl_max_plane_width(fb, 1, rotation);
3476 int max_height = 4096;
3477 int x = plane_state->base.src.x1 >> 17;
3478 int y = plane_state->base.src.y1 >> 17;
3479 int w = drm_rect_width(&plane_state->base.src) >> 17;
3480 int h = drm_rect_height(&plane_state->base.src) >> 17;
3481 u32 offset;
3482
3483 intel_add_fb_offsets(&x, &y, plane_state, 1);
3484 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3485
3486 /* FIXME not quite sure how/if these apply to the chroma plane */
3487 if (w > max_width || h > max_height) {
3488 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3489 w, h, max_width, max_height);
3490 return -EINVAL;
3491 }
3492
3493 plane_state->color_plane[1].offset = offset;
3494 plane_state->color_plane[1].x = x;
3495 plane_state->color_plane[1].y = y;
3496
3497 return 0;
3498 }
3499
3500 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3501 {
3502 const struct drm_framebuffer *fb = plane_state->base.fb;
3503 int src_x = plane_state->base.src.x1 >> 16;
3504 int src_y = plane_state->base.src.y1 >> 16;
3505 int hsub = fb->format->hsub;
3506 int vsub = fb->format->vsub;
3507 int x = src_x / hsub;
3508 int y = src_y / vsub;
3509 u32 offset;
3510
3511 intel_add_fb_offsets(&x, &y, plane_state, 1);
3512 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3513
3514 plane_state->color_plane[1].offset = offset;
3515 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3516 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3517
3518 return 0;
3519 }
3520
3521 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3522 {
3523 const struct drm_framebuffer *fb = plane_state->base.fb;
3524 int ret;
3525
3526 ret = intel_plane_compute_gtt(plane_state);
3527 if (ret)
3528 return ret;
3529
3530 if (!plane_state->base.visible)
3531 return 0;
3532
3533 /*
3534 * Handle the AUX surface first since
3535 * the main surface setup depends on it.
3536 */
3537 if (is_planar_yuv_format(fb->format->format)) {
3538 ret = skl_check_nv12_aux_surface(plane_state);
3539 if (ret)
3540 return ret;
3541 } else if (is_ccs_modifier(fb->modifier)) {
3542 ret = skl_check_ccs_aux_surface(plane_state);
3543 if (ret)
3544 return ret;
3545 } else {
3546 plane_state->color_plane[1].offset = ~0xfff;
3547 plane_state->color_plane[1].x = 0;
3548 plane_state->color_plane[1].y = 0;
3549 }
3550
3551 ret = skl_check_main_surface(plane_state);
3552 if (ret)
3553 return ret;
3554
3555 return 0;
3556 }
3557
3558 unsigned int
3559 i9xx_plane_max_stride(struct intel_plane *plane,
3560 u32 pixel_format, u64 modifier,
3561 unsigned int rotation)
3562 {
3563 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3564
3565 if (!HAS_GMCH(dev_priv)) {
3566 return 32*1024;
3567 } else if (INTEL_GEN(dev_priv) >= 4) {
3568 if (modifier == I915_FORMAT_MOD_X_TILED)
3569 return 16*1024;
3570 else
3571 return 32*1024;
3572 } else if (INTEL_GEN(dev_priv) >= 3) {
3573 if (modifier == I915_FORMAT_MOD_X_TILED)
3574 return 8*1024;
3575 else
3576 return 16*1024;
3577 } else {
3578 if (plane->i9xx_plane == PLANE_C)
3579 return 4*1024;
3580 else
3581 return 8*1024;
3582 }
3583 }
3584
3585 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3586 {
3587 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3588 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3589 u32 dspcntr = 0;
3590
3591 if (crtc_state->gamma_enable)
3592 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3593
3594 if (crtc_state->csc_enable)
3595 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3596
3597 if (INTEL_GEN(dev_priv) < 5)
3598 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3599
3600 return dspcntr;
3601 }
3602
3603 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3604 const struct intel_plane_state *plane_state)
3605 {
3606 struct drm_i915_private *dev_priv =
3607 to_i915(plane_state->base.plane->dev);
3608 const struct drm_framebuffer *fb = plane_state->base.fb;
3609 unsigned int rotation = plane_state->base.rotation;
3610 u32 dspcntr;
3611
3612 dspcntr = DISPLAY_PLANE_ENABLE;
3613
3614 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3615 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3616 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3617
3618 switch (fb->format->format) {
3619 case DRM_FORMAT_C8:
3620 dspcntr |= DISPPLANE_8BPP;
3621 break;
3622 case DRM_FORMAT_XRGB1555:
3623 dspcntr |= DISPPLANE_BGRX555;
3624 break;
3625 case DRM_FORMAT_RGB565:
3626 dspcntr |= DISPPLANE_BGRX565;
3627 break;
3628 case DRM_FORMAT_XRGB8888:
3629 dspcntr |= DISPPLANE_BGRX888;
3630 break;
3631 case DRM_FORMAT_XBGR8888:
3632 dspcntr |= DISPPLANE_RGBX888;
3633 break;
3634 case DRM_FORMAT_XRGB2101010:
3635 dspcntr |= DISPPLANE_BGRX101010;
3636 break;
3637 case DRM_FORMAT_XBGR2101010:
3638 dspcntr |= DISPPLANE_RGBX101010;
3639 break;
3640 default:
3641 MISSING_CASE(fb->format->format);
3642 return 0;
3643 }
3644
3645 if (INTEL_GEN(dev_priv) >= 4 &&
3646 fb->modifier == I915_FORMAT_MOD_X_TILED)
3647 dspcntr |= DISPPLANE_TILED;
3648
3649 if (rotation & DRM_MODE_ROTATE_180)
3650 dspcntr |= DISPPLANE_ROTATE_180;
3651
3652 if (rotation & DRM_MODE_REFLECT_X)
3653 dspcntr |= DISPPLANE_MIRROR;
3654
3655 return dspcntr;
3656 }
3657
3658 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3659 {
3660 struct drm_i915_private *dev_priv =
3661 to_i915(plane_state->base.plane->dev);
3662 int src_x, src_y;
3663 u32 offset;
3664 int ret;
3665
3666 ret = intel_plane_compute_gtt(plane_state);
3667 if (ret)
3668 return ret;
3669
3670 if (!plane_state->base.visible)
3671 return 0;
3672
3673 src_x = plane_state->base.src.x1 >> 16;
3674 src_y = plane_state->base.src.y1 >> 16;
3675
3676 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3677
3678 if (INTEL_GEN(dev_priv) >= 4)
3679 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3680 plane_state, 0);
3681 else
3682 offset = 0;
3683
3684 /*
3685 * Put the final coordinates back so that the src
3686 * coordinate checks will see the right values.
3687 */
3688 drm_rect_translate(&plane_state->base.src,
3689 (src_x << 16) - plane_state->base.src.x1,
3690 (src_y << 16) - plane_state->base.src.y1);
3691
3692 /* HSW/BDW do this automagically in hardware */
3693 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3694 unsigned int rotation = plane_state->base.rotation;
3695 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3696 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3697
3698 if (rotation & DRM_MODE_ROTATE_180) {
3699 src_x += src_w - 1;
3700 src_y += src_h - 1;
3701 } else if (rotation & DRM_MODE_REFLECT_X) {
3702 src_x += src_w - 1;
3703 }
3704 }
3705
3706 plane_state->color_plane[0].offset = offset;
3707 plane_state->color_plane[0].x = src_x;
3708 plane_state->color_plane[0].y = src_y;
3709
3710 return 0;
3711 }
3712
3713 static int
3714 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3715 struct intel_plane_state *plane_state)
3716 {
3717 int ret;
3718
3719 ret = chv_plane_check_rotation(plane_state);
3720 if (ret)
3721 return ret;
3722
3723 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3724 &crtc_state->base,
3725 DRM_PLANE_HELPER_NO_SCALING,
3726 DRM_PLANE_HELPER_NO_SCALING,
3727 false, true);
3728 if (ret)
3729 return ret;
3730
3731 ret = i9xx_check_plane_surface(plane_state);
3732 if (ret)
3733 return ret;
3734
3735 if (!plane_state->base.visible)
3736 return 0;
3737
3738 ret = intel_plane_check_src_coordinates(plane_state);
3739 if (ret)
3740 return ret;
3741
3742 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3743
3744 return 0;
3745 }
3746
3747 static void i9xx_update_plane(struct intel_plane *plane,
3748 const struct intel_crtc_state *crtc_state,
3749 const struct intel_plane_state *plane_state)
3750 {
3751 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3752 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3753 u32 linear_offset;
3754 int x = plane_state->color_plane[0].x;
3755 int y = plane_state->color_plane[0].y;
3756 unsigned long irqflags;
3757 u32 dspaddr_offset;
3758 u32 dspcntr;
3759
3760 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3761
3762 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3763
3764 if (INTEL_GEN(dev_priv) >= 4)
3765 dspaddr_offset = plane_state->color_plane[0].offset;
3766 else
3767 dspaddr_offset = linear_offset;
3768
3769 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3770
3771 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3772
3773 if (INTEL_GEN(dev_priv) < 4) {
3774 /* pipesrc and dspsize control the size that is scaled from,
3775 * which should always be the user's requested size.
3776 */
3777 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3778 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3779 ((crtc_state->pipe_src_h - 1) << 16) |
3780 (crtc_state->pipe_src_w - 1));
3781 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3782 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3783 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3784 ((crtc_state->pipe_src_h - 1) << 16) |
3785 (crtc_state->pipe_src_w - 1));
3786 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3787 }
3788
3789 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3790 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3791 } else if (INTEL_GEN(dev_priv) >= 4) {
3792 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3793 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3794 }
3795
3796 /*
3797 * The control register self-arms if the plane was previously
3798 * disabled. Try to make the plane enable atomic by writing
3799 * the control register just before the surface register.
3800 */
3801 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3802 if (INTEL_GEN(dev_priv) >= 4)
3803 I915_WRITE_FW(DSPSURF(i9xx_plane),
3804 intel_plane_ggtt_offset(plane_state) +
3805 dspaddr_offset);
3806 else
3807 I915_WRITE_FW(DSPADDR(i9xx_plane),
3808 intel_plane_ggtt_offset(plane_state) +
3809 dspaddr_offset);
3810
3811 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3812 }
3813
3814 static void i9xx_disable_plane(struct intel_plane *plane,
3815 const struct intel_crtc_state *crtc_state)
3816 {
3817 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3818 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3819 unsigned long irqflags;
3820 u32 dspcntr;
3821
3822 /*
3823 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3824 * enable on ilk+ affect the pipe bottom color as
3825 * well, so we must configure them even if the plane
3826 * is disabled.
3827 *
3828 * On pre-g4x there is no way to gamma correct the
3829 * pipe bottom color but we'll keep on doing this
3830 * anyway so that the crtc state readout works correctly.
3831 */
3832 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3833
3834 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3835
3836 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3837 if (INTEL_GEN(dev_priv) >= 4)
3838 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3839 else
3840 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3841
3842 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3843 }
3844
3845 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3846 enum pipe *pipe)
3847 {
3848 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3849 enum intel_display_power_domain power_domain;
3850 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3851 intel_wakeref_t wakeref;
3852 bool ret;
3853 u32 val;
3854
3855 /*
3856 * Not 100% correct for planes that can move between pipes,
3857 * but that's only the case for gen2-4 which don't have any
3858 * display power wells.
3859 */
3860 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3861 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3862 if (!wakeref)
3863 return false;
3864
3865 val = I915_READ(DSPCNTR(i9xx_plane));
3866
3867 ret = val & DISPLAY_PLANE_ENABLE;
3868
3869 if (INTEL_GEN(dev_priv) >= 5)
3870 *pipe = plane->pipe;
3871 else
3872 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3873 DISPPLANE_SEL_PIPE_SHIFT;
3874
3875 intel_display_power_put(dev_priv, power_domain, wakeref);
3876
3877 return ret;
3878 }
3879
3880 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3881 {
3882 struct drm_device *dev = intel_crtc->base.dev;
3883 struct drm_i915_private *dev_priv = to_i915(dev);
3884
3885 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3886 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3887 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3888 }
3889
3890 /*
3891 * This function detaches (aka. unbinds) unused scalers in hardware
3892 */
3893 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3894 {
3895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3896 const struct intel_crtc_scaler_state *scaler_state =
3897 &crtc_state->scaler_state;
3898 int i;
3899
3900 /* loop through and disable scalers that aren't in use */
3901 for (i = 0; i < intel_crtc->num_scalers; i++) {
3902 if (!scaler_state->scalers[i].in_use)
3903 skl_detach_scaler(intel_crtc, i);
3904 }
3905 }
3906
3907 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3908 int color_plane, unsigned int rotation)
3909 {
3910 /*
3911 * The stride is either expressed as a multiple of 64 bytes chunks for
3912 * linear buffers or in number of tiles for tiled buffers.
3913 */
3914 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3915 return 64;
3916 else if (drm_rotation_90_or_270(rotation))
3917 return intel_tile_height(fb, color_plane);
3918 else
3919 return intel_tile_width_bytes(fb, color_plane);
3920 }
3921
3922 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3923 int color_plane)
3924 {
3925 const struct drm_framebuffer *fb = plane_state->base.fb;
3926 unsigned int rotation = plane_state->base.rotation;
3927 u32 stride = plane_state->color_plane[color_plane].stride;
3928
3929 if (color_plane >= fb->format->num_planes)
3930 return 0;
3931
3932 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3933 }
3934
3935 static u32 skl_plane_ctl_format(u32 pixel_format)
3936 {
3937 switch (pixel_format) {
3938 case DRM_FORMAT_C8:
3939 return PLANE_CTL_FORMAT_INDEXED;
3940 case DRM_FORMAT_RGB565:
3941 return PLANE_CTL_FORMAT_RGB_565;
3942 case DRM_FORMAT_XBGR8888:
3943 case DRM_FORMAT_ABGR8888:
3944 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3945 case DRM_FORMAT_XRGB8888:
3946 case DRM_FORMAT_ARGB8888:
3947 return PLANE_CTL_FORMAT_XRGB_8888;
3948 case DRM_FORMAT_XRGB2101010:
3949 return PLANE_CTL_FORMAT_XRGB_2101010;
3950 case DRM_FORMAT_XBGR2101010:
3951 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3952 case DRM_FORMAT_XBGR16161616F:
3953 case DRM_FORMAT_ABGR16161616F:
3954 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3955 case DRM_FORMAT_XRGB16161616F:
3956 case DRM_FORMAT_ARGB16161616F:
3957 return PLANE_CTL_FORMAT_XRGB_16161616F;
3958 case DRM_FORMAT_YUYV:
3959 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3960 case DRM_FORMAT_YVYU:
3961 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3962 case DRM_FORMAT_UYVY:
3963 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3964 case DRM_FORMAT_VYUY:
3965 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3966 case DRM_FORMAT_NV12:
3967 return PLANE_CTL_FORMAT_NV12;
3968 case DRM_FORMAT_P010:
3969 return PLANE_CTL_FORMAT_P010;
3970 case DRM_FORMAT_P012:
3971 return PLANE_CTL_FORMAT_P012;
3972 case DRM_FORMAT_P016:
3973 return PLANE_CTL_FORMAT_P016;
3974 case DRM_FORMAT_Y210:
3975 return PLANE_CTL_FORMAT_Y210;
3976 case DRM_FORMAT_Y212:
3977 return PLANE_CTL_FORMAT_Y212;
3978 case DRM_FORMAT_Y216:
3979 return PLANE_CTL_FORMAT_Y216;
3980 case DRM_FORMAT_XVYU2101010:
3981 return PLANE_CTL_FORMAT_Y410;
3982 case DRM_FORMAT_XVYU12_16161616:
3983 return PLANE_CTL_FORMAT_Y412;
3984 case DRM_FORMAT_XVYU16161616:
3985 return PLANE_CTL_FORMAT_Y416;
3986 default:
3987 MISSING_CASE(pixel_format);
3988 }
3989
3990 return 0;
3991 }
3992
3993 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3994 {
3995 if (!plane_state->base.fb->format->has_alpha)
3996 return PLANE_CTL_ALPHA_DISABLE;
3997
3998 switch (plane_state->base.pixel_blend_mode) {
3999 case DRM_MODE_BLEND_PIXEL_NONE:
4000 return PLANE_CTL_ALPHA_DISABLE;
4001 case DRM_MODE_BLEND_PREMULTI:
4002 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4003 case DRM_MODE_BLEND_COVERAGE:
4004 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4005 default:
4006 MISSING_CASE(plane_state->base.pixel_blend_mode);
4007 return PLANE_CTL_ALPHA_DISABLE;
4008 }
4009 }
4010
4011 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4012 {
4013 if (!plane_state->base.fb->format->has_alpha)
4014 return PLANE_COLOR_ALPHA_DISABLE;
4015
4016 switch (plane_state->base.pixel_blend_mode) {
4017 case DRM_MODE_BLEND_PIXEL_NONE:
4018 return PLANE_COLOR_ALPHA_DISABLE;
4019 case DRM_MODE_BLEND_PREMULTI:
4020 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4021 case DRM_MODE_BLEND_COVERAGE:
4022 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4023 default:
4024 MISSING_CASE(plane_state->base.pixel_blend_mode);
4025 return PLANE_COLOR_ALPHA_DISABLE;
4026 }
4027 }
4028
4029 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4030 {
4031 switch (fb_modifier) {
4032 case DRM_FORMAT_MOD_LINEAR:
4033 break;
4034 case I915_FORMAT_MOD_X_TILED:
4035 return PLANE_CTL_TILED_X;
4036 case I915_FORMAT_MOD_Y_TILED:
4037 return PLANE_CTL_TILED_Y;
4038 case I915_FORMAT_MOD_Y_TILED_CCS:
4039 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4040 case I915_FORMAT_MOD_Yf_TILED:
4041 return PLANE_CTL_TILED_YF;
4042 case I915_FORMAT_MOD_Yf_TILED_CCS:
4043 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4044 default:
4045 MISSING_CASE(fb_modifier);
4046 }
4047
4048 return 0;
4049 }
4050
4051 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4052 {
4053 switch (rotate) {
4054 case DRM_MODE_ROTATE_0:
4055 break;
4056 /*
4057 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4058 * while i915 HW rotation is clockwise, thats why this swapping.
4059 */
4060 case DRM_MODE_ROTATE_90:
4061 return PLANE_CTL_ROTATE_270;
4062 case DRM_MODE_ROTATE_180:
4063 return PLANE_CTL_ROTATE_180;
4064 case DRM_MODE_ROTATE_270:
4065 return PLANE_CTL_ROTATE_90;
4066 default:
4067 MISSING_CASE(rotate);
4068 }
4069
4070 return 0;
4071 }
4072
4073 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4074 {
4075 switch (reflect) {
4076 case 0:
4077 break;
4078 case DRM_MODE_REFLECT_X:
4079 return PLANE_CTL_FLIP_HORIZONTAL;
4080 case DRM_MODE_REFLECT_Y:
4081 default:
4082 MISSING_CASE(reflect);
4083 }
4084
4085 return 0;
4086 }
4087
4088 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4089 {
4090 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4091 u32 plane_ctl = 0;
4092
4093 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4094 return plane_ctl;
4095
4096 if (crtc_state->gamma_enable)
4097 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4098
4099 if (crtc_state->csc_enable)
4100 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4101
4102 return plane_ctl;
4103 }
4104
4105 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4106 const struct intel_plane_state *plane_state)
4107 {
4108 struct drm_i915_private *dev_priv =
4109 to_i915(plane_state->base.plane->dev);
4110 const struct drm_framebuffer *fb = plane_state->base.fb;
4111 unsigned int rotation = plane_state->base.rotation;
4112 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4113 u32 plane_ctl;
4114
4115 plane_ctl = PLANE_CTL_ENABLE;
4116
4117 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4118 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4119 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4120
4121 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4122 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4123
4124 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4125 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4126 }
4127
4128 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4129 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4130 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4131
4132 if (INTEL_GEN(dev_priv) >= 10)
4133 plane_ctl |= cnl_plane_ctl_flip(rotation &
4134 DRM_MODE_REFLECT_MASK);
4135
4136 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4137 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4138 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4139 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4140
4141 return plane_ctl;
4142 }
4143
4144 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4145 {
4146 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4147 u32 plane_color_ctl = 0;
4148
4149 if (INTEL_GEN(dev_priv) >= 11)
4150 return plane_color_ctl;
4151
4152 if (crtc_state->gamma_enable)
4153 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4154
4155 if (crtc_state->csc_enable)
4156 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4157
4158 return plane_color_ctl;
4159 }
4160
4161 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4162 const struct intel_plane_state *plane_state)
4163 {
4164 struct drm_i915_private *dev_priv =
4165 to_i915(plane_state->base.plane->dev);
4166 const struct drm_framebuffer *fb = plane_state->base.fb;
4167 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4168 u32 plane_color_ctl = 0;
4169
4170 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4171 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4172
4173 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4174 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4175 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4176 else
4177 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4178
4179 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4180 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4181 } else if (fb->format->is_yuv) {
4182 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4183 }
4184
4185 return plane_color_ctl;
4186 }
4187
4188 static int
4189 __intel_display_resume(struct drm_device *dev,
4190 struct drm_atomic_state *state,
4191 struct drm_modeset_acquire_ctx *ctx)
4192 {
4193 struct drm_crtc_state *crtc_state;
4194 struct drm_crtc *crtc;
4195 int i, ret;
4196
4197 intel_modeset_setup_hw_state(dev, ctx);
4198 i915_redisable_vga(to_i915(dev));
4199
4200 if (!state)
4201 return 0;
4202
4203 /*
4204 * We've duplicated the state, pointers to the old state are invalid.
4205 *
4206 * Don't attempt to use the old state until we commit the duplicated state.
4207 */
4208 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4209 /*
4210 * Force recalculation even if we restore
4211 * current state. With fast modeset this may not result
4212 * in a modeset when the state is compatible.
4213 */
4214 crtc_state->mode_changed = true;
4215 }
4216
4217 /* ignore any reset values/BIOS leftovers in the WM registers */
4218 if (!HAS_GMCH(to_i915(dev)))
4219 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4220
4221 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4222
4223 WARN_ON(ret == -EDEADLK);
4224 return ret;
4225 }
4226
4227 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4228 {
4229 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4230 intel_has_gpu_reset(dev_priv));
4231 }
4232
4233 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4234 {
4235 struct drm_device *dev = &dev_priv->drm;
4236 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4237 struct drm_atomic_state *state;
4238 int ret;
4239
4240 /* reset doesn't touch the display */
4241 if (!i915_modparams.force_reset_modeset_test &&
4242 !gpu_reset_clobbers_display(dev_priv))
4243 return;
4244
4245 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4246 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4247 wake_up_all(&dev_priv->gpu_error.wait_queue);
4248
4249 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4250 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4251 i915_gem_set_wedged(dev_priv);
4252 }
4253
4254 /*
4255 * Need mode_config.mutex so that we don't
4256 * trample ongoing ->detect() and whatnot.
4257 */
4258 mutex_lock(&dev->mode_config.mutex);
4259 drm_modeset_acquire_init(ctx, 0);
4260 while (1) {
4261 ret = drm_modeset_lock_all_ctx(dev, ctx);
4262 if (ret != -EDEADLK)
4263 break;
4264
4265 drm_modeset_backoff(ctx);
4266 }
4267 /*
4268 * Disabling the crtcs gracefully seems nicer. Also the
4269 * g33 docs say we should at least disable all the planes.
4270 */
4271 state = drm_atomic_helper_duplicate_state(dev, ctx);
4272 if (IS_ERR(state)) {
4273 ret = PTR_ERR(state);
4274 DRM_ERROR("Duplicating state failed with %i\n", ret);
4275 return;
4276 }
4277
4278 ret = drm_atomic_helper_disable_all(dev, ctx);
4279 if (ret) {
4280 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4281 drm_atomic_state_put(state);
4282 return;
4283 }
4284
4285 dev_priv->modeset_restore_state = state;
4286 state->acquire_ctx = ctx;
4287 }
4288
4289 void intel_finish_reset(struct drm_i915_private *dev_priv)
4290 {
4291 struct drm_device *dev = &dev_priv->drm;
4292 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4293 struct drm_atomic_state *state;
4294 int ret;
4295
4296 /* reset doesn't touch the display */
4297 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
4298 return;
4299
4300 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4301 if (!state)
4302 goto unlock;
4303
4304 /* reset doesn't touch the display */
4305 if (!gpu_reset_clobbers_display(dev_priv)) {
4306 /* for testing only restore the display */
4307 ret = __intel_display_resume(dev, state, ctx);
4308 if (ret)
4309 DRM_ERROR("Restoring old state failed with %i\n", ret);
4310 } else {
4311 /*
4312 * The display has been reset as well,
4313 * so need a full re-initialization.
4314 */
4315 intel_pps_unlock_regs_wa(dev_priv);
4316 intel_modeset_init_hw(dev);
4317 intel_init_clock_gating(dev_priv);
4318
4319 spin_lock_irq(&dev_priv->irq_lock);
4320 if (dev_priv->display.hpd_irq_setup)
4321 dev_priv->display.hpd_irq_setup(dev_priv);
4322 spin_unlock_irq(&dev_priv->irq_lock);
4323
4324 ret = __intel_display_resume(dev, state, ctx);
4325 if (ret)
4326 DRM_ERROR("Restoring old state failed with %i\n", ret);
4327
4328 intel_hpd_init(dev_priv);
4329 }
4330
4331 drm_atomic_state_put(state);
4332 unlock:
4333 drm_modeset_drop_locks(ctx);
4334 drm_modeset_acquire_fini(ctx);
4335 mutex_unlock(&dev->mode_config.mutex);
4336
4337 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4338 }
4339
4340 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4341 {
4342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4343 enum pipe pipe = crtc->pipe;
4344 u32 tmp;
4345
4346 tmp = I915_READ(PIPE_CHICKEN(pipe));
4347
4348 /*
4349 * Display WA #1153: icl
4350 * enable hardware to bypass the alpha math
4351 * and rounding for per-pixel values 00 and 0xff
4352 */
4353 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4354 /*
4355 * Display WA # 1605353570: icl
4356 * Set the pixel rounding bit to 1 for allowing
4357 * passthrough of Frame buffer pixels unmodified
4358 * across pipe
4359 */
4360 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4361 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4362 }
4363
4364 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4365 const struct intel_crtc_state *new_crtc_state)
4366 {
4367 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4368 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4369
4370 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4371 crtc->base.mode = new_crtc_state->base.mode;
4372
4373 /*
4374 * Update pipe size and adjust fitter if needed: the reason for this is
4375 * that in compute_mode_changes we check the native mode (not the pfit
4376 * mode) to see if we can flip rather than do a full mode set. In the
4377 * fastboot case, we'll flip, but if we don't update the pipesrc and
4378 * pfit state, we'll end up with a big fb scanned out into the wrong
4379 * sized surface.
4380 */
4381
4382 I915_WRITE(PIPESRC(crtc->pipe),
4383 ((new_crtc_state->pipe_src_w - 1) << 16) |
4384 (new_crtc_state->pipe_src_h - 1));
4385
4386 /* on skylake this is done by detaching scalers */
4387 if (INTEL_GEN(dev_priv) >= 9) {
4388 skl_detach_scalers(new_crtc_state);
4389
4390 if (new_crtc_state->pch_pfit.enabled)
4391 skylake_pfit_enable(new_crtc_state);
4392 } else if (HAS_PCH_SPLIT(dev_priv)) {
4393 if (new_crtc_state->pch_pfit.enabled)
4394 ironlake_pfit_enable(new_crtc_state);
4395 else if (old_crtc_state->pch_pfit.enabled)
4396 ironlake_pfit_disable(old_crtc_state);
4397 }
4398
4399 if (INTEL_GEN(dev_priv) >= 11)
4400 icl_set_pipe_chicken(crtc);
4401 }
4402
4403 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4404 {
4405 struct drm_device *dev = crtc->base.dev;
4406 struct drm_i915_private *dev_priv = to_i915(dev);
4407 int pipe = crtc->pipe;
4408 i915_reg_t reg;
4409 u32 temp;
4410
4411 /* enable normal train */
4412 reg = FDI_TX_CTL(pipe);
4413 temp = I915_READ(reg);
4414 if (IS_IVYBRIDGE(dev_priv)) {
4415 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4416 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4417 } else {
4418 temp &= ~FDI_LINK_TRAIN_NONE;
4419 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4420 }
4421 I915_WRITE(reg, temp);
4422
4423 reg = FDI_RX_CTL(pipe);
4424 temp = I915_READ(reg);
4425 if (HAS_PCH_CPT(dev_priv)) {
4426 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4427 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4428 } else {
4429 temp &= ~FDI_LINK_TRAIN_NONE;
4430 temp |= FDI_LINK_TRAIN_NONE;
4431 }
4432 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4433
4434 /* wait one idle pattern time */
4435 POSTING_READ(reg);
4436 udelay(1000);
4437
4438 /* IVB wants error correction enabled */
4439 if (IS_IVYBRIDGE(dev_priv))
4440 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4441 FDI_FE_ERRC_ENABLE);
4442 }
4443
4444 /* The FDI link training functions for ILK/Ibexpeak. */
4445 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4446 const struct intel_crtc_state *crtc_state)
4447 {
4448 struct drm_device *dev = crtc->base.dev;
4449 struct drm_i915_private *dev_priv = to_i915(dev);
4450 int pipe = crtc->pipe;
4451 i915_reg_t reg;
4452 u32 temp, tries;
4453
4454 /* FDI needs bits from pipe first */
4455 assert_pipe_enabled(dev_priv, pipe);
4456
4457 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4458 for train result */
4459 reg = FDI_RX_IMR(pipe);
4460 temp = I915_READ(reg);
4461 temp &= ~FDI_RX_SYMBOL_LOCK;
4462 temp &= ~FDI_RX_BIT_LOCK;
4463 I915_WRITE(reg, temp);
4464 I915_READ(reg);
4465 udelay(150);
4466
4467 /* enable CPU FDI TX and PCH FDI RX */
4468 reg = FDI_TX_CTL(pipe);
4469 temp = I915_READ(reg);
4470 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4471 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4472 temp &= ~FDI_LINK_TRAIN_NONE;
4473 temp |= FDI_LINK_TRAIN_PATTERN_1;
4474 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4475
4476 reg = FDI_RX_CTL(pipe);
4477 temp = I915_READ(reg);
4478 temp &= ~FDI_LINK_TRAIN_NONE;
4479 temp |= FDI_LINK_TRAIN_PATTERN_1;
4480 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4481
4482 POSTING_READ(reg);
4483 udelay(150);
4484
4485 /* Ironlake workaround, enable clock pointer after FDI enable*/
4486 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4487 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4488 FDI_RX_PHASE_SYNC_POINTER_EN);
4489
4490 reg = FDI_RX_IIR(pipe);
4491 for (tries = 0; tries < 5; tries++) {
4492 temp = I915_READ(reg);
4493 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4494
4495 if ((temp & FDI_RX_BIT_LOCK)) {
4496 DRM_DEBUG_KMS("FDI train 1 done.\n");
4497 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4498 break;
4499 }
4500 }
4501 if (tries == 5)
4502 DRM_ERROR("FDI train 1 fail!\n");
4503
4504 /* Train 2 */
4505 reg = FDI_TX_CTL(pipe);
4506 temp = I915_READ(reg);
4507 temp &= ~FDI_LINK_TRAIN_NONE;
4508 temp |= FDI_LINK_TRAIN_PATTERN_2;
4509 I915_WRITE(reg, temp);
4510
4511 reg = FDI_RX_CTL(pipe);
4512 temp = I915_READ(reg);
4513 temp &= ~FDI_LINK_TRAIN_NONE;
4514 temp |= FDI_LINK_TRAIN_PATTERN_2;
4515 I915_WRITE(reg, temp);
4516
4517 POSTING_READ(reg);
4518 udelay(150);
4519
4520 reg = FDI_RX_IIR(pipe);
4521 for (tries = 0; tries < 5; tries++) {
4522 temp = I915_READ(reg);
4523 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4524
4525 if (temp & FDI_RX_SYMBOL_LOCK) {
4526 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4527 DRM_DEBUG_KMS("FDI train 2 done.\n");
4528 break;
4529 }
4530 }
4531 if (tries == 5)
4532 DRM_ERROR("FDI train 2 fail!\n");
4533
4534 DRM_DEBUG_KMS("FDI train done\n");
4535
4536 }
4537
4538 static const int snb_b_fdi_train_param[] = {
4539 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4540 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4541 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4542 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4543 };
4544
4545 /* The FDI link training functions for SNB/Cougarpoint. */
4546 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4547 const struct intel_crtc_state *crtc_state)
4548 {
4549 struct drm_device *dev = crtc->base.dev;
4550 struct drm_i915_private *dev_priv = to_i915(dev);
4551 int pipe = crtc->pipe;
4552 i915_reg_t reg;
4553 u32 temp, i, retry;
4554
4555 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4556 for train result */
4557 reg = FDI_RX_IMR(pipe);
4558 temp = I915_READ(reg);
4559 temp &= ~FDI_RX_SYMBOL_LOCK;
4560 temp &= ~FDI_RX_BIT_LOCK;
4561 I915_WRITE(reg, temp);
4562
4563 POSTING_READ(reg);
4564 udelay(150);
4565
4566 /* enable CPU FDI TX and PCH FDI RX */
4567 reg = FDI_TX_CTL(pipe);
4568 temp = I915_READ(reg);
4569 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4570 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4571 temp &= ~FDI_LINK_TRAIN_NONE;
4572 temp |= FDI_LINK_TRAIN_PATTERN_1;
4573 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4574 /* SNB-B */
4575 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4576 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4577
4578 I915_WRITE(FDI_RX_MISC(pipe),
4579 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4580
4581 reg = FDI_RX_CTL(pipe);
4582 temp = I915_READ(reg);
4583 if (HAS_PCH_CPT(dev_priv)) {
4584 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4585 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4586 } else {
4587 temp &= ~FDI_LINK_TRAIN_NONE;
4588 temp |= FDI_LINK_TRAIN_PATTERN_1;
4589 }
4590 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4591
4592 POSTING_READ(reg);
4593 udelay(150);
4594
4595 for (i = 0; i < 4; i++) {
4596 reg = FDI_TX_CTL(pipe);
4597 temp = I915_READ(reg);
4598 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4599 temp |= snb_b_fdi_train_param[i];
4600 I915_WRITE(reg, temp);
4601
4602 POSTING_READ(reg);
4603 udelay(500);
4604
4605 for (retry = 0; retry < 5; retry++) {
4606 reg = FDI_RX_IIR(pipe);
4607 temp = I915_READ(reg);
4608 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4609 if (temp & FDI_RX_BIT_LOCK) {
4610 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4611 DRM_DEBUG_KMS("FDI train 1 done.\n");
4612 break;
4613 }
4614 udelay(50);
4615 }
4616 if (retry < 5)
4617 break;
4618 }
4619 if (i == 4)
4620 DRM_ERROR("FDI train 1 fail!\n");
4621
4622 /* Train 2 */
4623 reg = FDI_TX_CTL(pipe);
4624 temp = I915_READ(reg);
4625 temp &= ~FDI_LINK_TRAIN_NONE;
4626 temp |= FDI_LINK_TRAIN_PATTERN_2;
4627 if (IS_GEN(dev_priv, 6)) {
4628 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4629 /* SNB-B */
4630 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4631 }
4632 I915_WRITE(reg, temp);
4633
4634 reg = FDI_RX_CTL(pipe);
4635 temp = I915_READ(reg);
4636 if (HAS_PCH_CPT(dev_priv)) {
4637 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4638 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4639 } else {
4640 temp &= ~FDI_LINK_TRAIN_NONE;
4641 temp |= FDI_LINK_TRAIN_PATTERN_2;
4642 }
4643 I915_WRITE(reg, temp);
4644
4645 POSTING_READ(reg);
4646 udelay(150);
4647
4648 for (i = 0; i < 4; i++) {
4649 reg = FDI_TX_CTL(pipe);
4650 temp = I915_READ(reg);
4651 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4652 temp |= snb_b_fdi_train_param[i];
4653 I915_WRITE(reg, temp);
4654
4655 POSTING_READ(reg);
4656 udelay(500);
4657
4658 for (retry = 0; retry < 5; retry++) {
4659 reg = FDI_RX_IIR(pipe);
4660 temp = I915_READ(reg);
4661 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4662 if (temp & FDI_RX_SYMBOL_LOCK) {
4663 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4664 DRM_DEBUG_KMS("FDI train 2 done.\n");
4665 break;
4666 }
4667 udelay(50);
4668 }
4669 if (retry < 5)
4670 break;
4671 }
4672 if (i == 4)
4673 DRM_ERROR("FDI train 2 fail!\n");
4674
4675 DRM_DEBUG_KMS("FDI train done.\n");
4676 }
4677
4678 /* Manual link training for Ivy Bridge A0 parts */
4679 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4680 const struct intel_crtc_state *crtc_state)
4681 {
4682 struct drm_device *dev = crtc->base.dev;
4683 struct drm_i915_private *dev_priv = to_i915(dev);
4684 int pipe = crtc->pipe;
4685 i915_reg_t reg;
4686 u32 temp, i, j;
4687
4688 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4689 for train result */
4690 reg = FDI_RX_IMR(pipe);
4691 temp = I915_READ(reg);
4692 temp &= ~FDI_RX_SYMBOL_LOCK;
4693 temp &= ~FDI_RX_BIT_LOCK;
4694 I915_WRITE(reg, temp);
4695
4696 POSTING_READ(reg);
4697 udelay(150);
4698
4699 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4700 I915_READ(FDI_RX_IIR(pipe)));
4701
4702 /* Try each vswing and preemphasis setting twice before moving on */
4703 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4704 /* disable first in case we need to retry */
4705 reg = FDI_TX_CTL(pipe);
4706 temp = I915_READ(reg);
4707 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4708 temp &= ~FDI_TX_ENABLE;
4709 I915_WRITE(reg, temp);
4710
4711 reg = FDI_RX_CTL(pipe);
4712 temp = I915_READ(reg);
4713 temp &= ~FDI_LINK_TRAIN_AUTO;
4714 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4715 temp &= ~FDI_RX_ENABLE;
4716 I915_WRITE(reg, temp);
4717
4718 /* enable CPU FDI TX and PCH FDI RX */
4719 reg = FDI_TX_CTL(pipe);
4720 temp = I915_READ(reg);
4721 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4722 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4723 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4724 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4725 temp |= snb_b_fdi_train_param[j/2];
4726 temp |= FDI_COMPOSITE_SYNC;
4727 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4728
4729 I915_WRITE(FDI_RX_MISC(pipe),
4730 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4731
4732 reg = FDI_RX_CTL(pipe);
4733 temp = I915_READ(reg);
4734 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4735 temp |= FDI_COMPOSITE_SYNC;
4736 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4737
4738 POSTING_READ(reg);
4739 udelay(1); /* should be 0.5us */
4740
4741 for (i = 0; i < 4; i++) {
4742 reg = FDI_RX_IIR(pipe);
4743 temp = I915_READ(reg);
4744 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4745
4746 if (temp & FDI_RX_BIT_LOCK ||
4747 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4748 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4749 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4750 i);
4751 break;
4752 }
4753 udelay(1); /* should be 0.5us */
4754 }
4755 if (i == 4) {
4756 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4757 continue;
4758 }
4759
4760 /* Train 2 */
4761 reg = FDI_TX_CTL(pipe);
4762 temp = I915_READ(reg);
4763 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4764 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4765 I915_WRITE(reg, temp);
4766
4767 reg = FDI_RX_CTL(pipe);
4768 temp = I915_READ(reg);
4769 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4770 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4771 I915_WRITE(reg, temp);
4772
4773 POSTING_READ(reg);
4774 udelay(2); /* should be 1.5us */
4775
4776 for (i = 0; i < 4; i++) {
4777 reg = FDI_RX_IIR(pipe);
4778 temp = I915_READ(reg);
4779 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4780
4781 if (temp & FDI_RX_SYMBOL_LOCK ||
4782 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4783 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4784 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4785 i);
4786 goto train_done;
4787 }
4788 udelay(2); /* should be 1.5us */
4789 }
4790 if (i == 4)
4791 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4792 }
4793
4794 train_done:
4795 DRM_DEBUG_KMS("FDI train done.\n");
4796 }
4797
4798 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4799 {
4800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4801 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4802 int pipe = intel_crtc->pipe;
4803 i915_reg_t reg;
4804 u32 temp;
4805
4806 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4807 reg = FDI_RX_CTL(pipe);
4808 temp = I915_READ(reg);
4809 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4810 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4811 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4812 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4813
4814 POSTING_READ(reg);
4815 udelay(200);
4816
4817 /* Switch from Rawclk to PCDclk */
4818 temp = I915_READ(reg);
4819 I915_WRITE(reg, temp | FDI_PCDCLK);
4820
4821 POSTING_READ(reg);
4822 udelay(200);
4823
4824 /* Enable CPU FDI TX PLL, always on for Ironlake */
4825 reg = FDI_TX_CTL(pipe);
4826 temp = I915_READ(reg);
4827 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4828 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4829
4830 POSTING_READ(reg);
4831 udelay(100);
4832 }
4833 }
4834
4835 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4836 {
4837 struct drm_device *dev = intel_crtc->base.dev;
4838 struct drm_i915_private *dev_priv = to_i915(dev);
4839 int pipe = intel_crtc->pipe;
4840 i915_reg_t reg;
4841 u32 temp;
4842
4843 /* Switch from PCDclk to Rawclk */
4844 reg = FDI_RX_CTL(pipe);
4845 temp = I915_READ(reg);
4846 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4847
4848 /* Disable CPU FDI TX PLL */
4849 reg = FDI_TX_CTL(pipe);
4850 temp = I915_READ(reg);
4851 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4852
4853 POSTING_READ(reg);
4854 udelay(100);
4855
4856 reg = FDI_RX_CTL(pipe);
4857 temp = I915_READ(reg);
4858 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4859
4860 /* Wait for the clocks to turn off. */
4861 POSTING_READ(reg);
4862 udelay(100);
4863 }
4864
4865 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4866 {
4867 struct drm_device *dev = crtc->dev;
4868 struct drm_i915_private *dev_priv = to_i915(dev);
4869 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4870 int pipe = intel_crtc->pipe;
4871 i915_reg_t reg;
4872 u32 temp;
4873
4874 /* disable CPU FDI tx and PCH FDI rx */
4875 reg = FDI_TX_CTL(pipe);
4876 temp = I915_READ(reg);
4877 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4878 POSTING_READ(reg);
4879
4880 reg = FDI_RX_CTL(pipe);
4881 temp = I915_READ(reg);
4882 temp &= ~(0x7 << 16);
4883 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4884 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4885
4886 POSTING_READ(reg);
4887 udelay(100);
4888
4889 /* Ironlake workaround, disable clock pointer after downing FDI */
4890 if (HAS_PCH_IBX(dev_priv))
4891 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4892
4893 /* still set train pattern 1 */
4894 reg = FDI_TX_CTL(pipe);
4895 temp = I915_READ(reg);
4896 temp &= ~FDI_LINK_TRAIN_NONE;
4897 temp |= FDI_LINK_TRAIN_PATTERN_1;
4898 I915_WRITE(reg, temp);
4899
4900 reg = FDI_RX_CTL(pipe);
4901 temp = I915_READ(reg);
4902 if (HAS_PCH_CPT(dev_priv)) {
4903 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4904 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4905 } else {
4906 temp &= ~FDI_LINK_TRAIN_NONE;
4907 temp |= FDI_LINK_TRAIN_PATTERN_1;
4908 }
4909 /* BPC in FDI rx is consistent with that in PIPECONF */
4910 temp &= ~(0x07 << 16);
4911 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4912 I915_WRITE(reg, temp);
4913
4914 POSTING_READ(reg);
4915 udelay(100);
4916 }
4917
4918 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4919 {
4920 struct drm_crtc *crtc;
4921 bool cleanup_done;
4922
4923 drm_for_each_crtc(crtc, &dev_priv->drm) {
4924 struct drm_crtc_commit *commit;
4925 spin_lock(&crtc->commit_lock);
4926 commit = list_first_entry_or_null(&crtc->commit_list,
4927 struct drm_crtc_commit, commit_entry);
4928 cleanup_done = commit ?
4929 try_wait_for_completion(&commit->cleanup_done) : true;
4930 spin_unlock(&crtc->commit_lock);
4931
4932 if (cleanup_done)
4933 continue;
4934
4935 drm_crtc_wait_one_vblank(crtc);
4936
4937 return true;
4938 }
4939
4940 return false;
4941 }
4942
4943 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4944 {
4945 u32 temp;
4946
4947 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4948
4949 mutex_lock(&dev_priv->sb_lock);
4950
4951 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4952 temp |= SBI_SSCCTL_DISABLE;
4953 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4954
4955 mutex_unlock(&dev_priv->sb_lock);
4956 }
4957
4958 /* Program iCLKIP clock to the desired frequency */
4959 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4960 {
4961 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4962 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4963 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4964 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4965 u32 temp;
4966
4967 lpt_disable_iclkip(dev_priv);
4968
4969 /* The iCLK virtual clock root frequency is in MHz,
4970 * but the adjusted_mode->crtc_clock in in KHz. To get the
4971 * divisors, it is necessary to divide one by another, so we
4972 * convert the virtual clock precision to KHz here for higher
4973 * precision.
4974 */
4975 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4976 u32 iclk_virtual_root_freq = 172800 * 1000;
4977 u32 iclk_pi_range = 64;
4978 u32 desired_divisor;
4979
4980 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4981 clock << auxdiv);
4982 divsel = (desired_divisor / iclk_pi_range) - 2;
4983 phaseinc = desired_divisor % iclk_pi_range;
4984
4985 /*
4986 * Near 20MHz is a corner case which is
4987 * out of range for the 7-bit divisor
4988 */
4989 if (divsel <= 0x7f)
4990 break;
4991 }
4992
4993 /* This should not happen with any sane values */
4994 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4995 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4996 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4997 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4998
4999 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5000 clock,
5001 auxdiv,
5002 divsel,
5003 phasedir,
5004 phaseinc);
5005
5006 mutex_lock(&dev_priv->sb_lock);
5007
5008 /* Program SSCDIVINTPHASE6 */
5009 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5010 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5011 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5012 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5013 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5014 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5015 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5016 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5017
5018 /* Program SSCAUXDIV */
5019 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5020 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5021 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5022 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5023
5024 /* Enable modulator and associated divider */
5025 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5026 temp &= ~SBI_SSCCTL_DISABLE;
5027 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5028
5029 mutex_unlock(&dev_priv->sb_lock);
5030
5031 /* Wait for initialization time */
5032 udelay(24);
5033
5034 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5035 }
5036
5037 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5038 {
5039 u32 divsel, phaseinc, auxdiv;
5040 u32 iclk_virtual_root_freq = 172800 * 1000;
5041 u32 iclk_pi_range = 64;
5042 u32 desired_divisor;
5043 u32 temp;
5044
5045 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5046 return 0;
5047
5048 mutex_lock(&dev_priv->sb_lock);
5049
5050 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5051 if (temp & SBI_SSCCTL_DISABLE) {
5052 mutex_unlock(&dev_priv->sb_lock);
5053 return 0;
5054 }
5055
5056 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5057 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5058 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5059 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5060 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5061
5062 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5063 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5064 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5065
5066 mutex_unlock(&dev_priv->sb_lock);
5067
5068 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5069
5070 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5071 desired_divisor << auxdiv);
5072 }
5073
5074 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5075 enum pipe pch_transcoder)
5076 {
5077 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5078 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5079 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5080
5081 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5082 I915_READ(HTOTAL(cpu_transcoder)));
5083 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5084 I915_READ(HBLANK(cpu_transcoder)));
5085 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5086 I915_READ(HSYNC(cpu_transcoder)));
5087
5088 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5089 I915_READ(VTOTAL(cpu_transcoder)));
5090 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5091 I915_READ(VBLANK(cpu_transcoder)));
5092 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5093 I915_READ(VSYNC(cpu_transcoder)));
5094 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5095 I915_READ(VSYNCSHIFT(cpu_transcoder)));
5096 }
5097
5098 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5099 {
5100 u32 temp;
5101
5102 temp = I915_READ(SOUTH_CHICKEN1);
5103 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5104 return;
5105
5106 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5107 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5108
5109 temp &= ~FDI_BC_BIFURCATION_SELECT;
5110 if (enable)
5111 temp |= FDI_BC_BIFURCATION_SELECT;
5112
5113 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5114 I915_WRITE(SOUTH_CHICKEN1, temp);
5115 POSTING_READ(SOUTH_CHICKEN1);
5116 }
5117
5118 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5119 {
5120 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5121 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5122
5123 switch (crtc->pipe) {
5124 case PIPE_A:
5125 break;
5126 case PIPE_B:
5127 if (crtc_state->fdi_lanes > 2)
5128 cpt_set_fdi_bc_bifurcation(dev_priv, false);
5129 else
5130 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5131
5132 break;
5133 case PIPE_C:
5134 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5135
5136 break;
5137 default:
5138 BUG();
5139 }
5140 }
5141
5142 /*
5143 * Finds the encoder associated with the given CRTC. This can only be
5144 * used when we know that the CRTC isn't feeding multiple encoders!
5145 */
5146 static struct intel_encoder *
5147 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5148 const struct intel_crtc_state *crtc_state)
5149 {
5150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5151 const struct drm_connector_state *connector_state;
5152 const struct drm_connector *connector;
5153 struct intel_encoder *encoder = NULL;
5154 int num_encoders = 0;
5155 int i;
5156
5157 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5158 if (connector_state->crtc != &crtc->base)
5159 continue;
5160
5161 encoder = to_intel_encoder(connector_state->best_encoder);
5162 num_encoders++;
5163 }
5164
5165 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5166 num_encoders, pipe_name(crtc->pipe));
5167
5168 return encoder;
5169 }
5170
5171 /*
5172 * Enable PCH resources required for PCH ports:
5173 * - PCH PLLs
5174 * - FDI training & RX/TX
5175 * - update transcoder timings
5176 * - DP transcoding bits
5177 * - transcoder
5178 */
5179 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5180 const struct intel_crtc_state *crtc_state)
5181 {
5182 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5183 struct drm_device *dev = crtc->base.dev;
5184 struct drm_i915_private *dev_priv = to_i915(dev);
5185 int pipe = crtc->pipe;
5186 u32 temp;
5187
5188 assert_pch_transcoder_disabled(dev_priv, pipe);
5189
5190 if (IS_IVYBRIDGE(dev_priv))
5191 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5192
5193 /* Write the TU size bits before fdi link training, so that error
5194 * detection works. */
5195 I915_WRITE(FDI_RX_TUSIZE1(pipe),
5196 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5197
5198 /* For PCH output, training FDI link */
5199 dev_priv->display.fdi_link_train(crtc, crtc_state);
5200
5201 /* We need to program the right clock selection before writing the pixel
5202 * mutliplier into the DPLL. */
5203 if (HAS_PCH_CPT(dev_priv)) {
5204 u32 sel;
5205
5206 temp = I915_READ(PCH_DPLL_SEL);
5207 temp |= TRANS_DPLL_ENABLE(pipe);
5208 sel = TRANS_DPLLB_SEL(pipe);
5209 if (crtc_state->shared_dpll ==
5210 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5211 temp |= sel;
5212 else
5213 temp &= ~sel;
5214 I915_WRITE(PCH_DPLL_SEL, temp);
5215 }
5216
5217 /* XXX: pch pll's can be enabled any time before we enable the PCH
5218 * transcoder, and we actually should do this to not upset any PCH
5219 * transcoder that already use the clock when we share it.
5220 *
5221 * Note that enable_shared_dpll tries to do the right thing, but
5222 * get_shared_dpll unconditionally resets the pll - we need that to have
5223 * the right LVDS enable sequence. */
5224 intel_enable_shared_dpll(crtc_state);
5225
5226 /* set transcoder timing, panel must allow it */
5227 assert_panel_unlocked(dev_priv, pipe);
5228 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5229
5230 intel_fdi_normal_train(crtc);
5231
5232 /* For PCH DP, enable TRANS_DP_CTL */
5233 if (HAS_PCH_CPT(dev_priv) &&
5234 intel_crtc_has_dp_encoder(crtc_state)) {
5235 const struct drm_display_mode *adjusted_mode =
5236 &crtc_state->base.adjusted_mode;
5237 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5238 i915_reg_t reg = TRANS_DP_CTL(pipe);
5239 enum port port;
5240
5241 temp = I915_READ(reg);
5242 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5243 TRANS_DP_SYNC_MASK |
5244 TRANS_DP_BPC_MASK);
5245 temp |= TRANS_DP_OUTPUT_ENABLE;
5246 temp |= bpc << 9; /* same format but at 11:9 */
5247
5248 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5249 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5250 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5251 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5252
5253 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5254 WARN_ON(port < PORT_B || port > PORT_D);
5255 temp |= TRANS_DP_PORT_SEL(port);
5256
5257 I915_WRITE(reg, temp);
5258 }
5259
5260 ironlake_enable_pch_transcoder(crtc_state);
5261 }
5262
5263 static void lpt_pch_enable(const struct intel_atomic_state *state,
5264 const struct intel_crtc_state *crtc_state)
5265 {
5266 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5267 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5268 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5269
5270 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5271
5272 lpt_program_iclkip(crtc_state);
5273
5274 /* Set transcoder timing. */
5275 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5276
5277 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5278 }
5279
5280 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
5281 {
5282 struct drm_i915_private *dev_priv = to_i915(dev);
5283 i915_reg_t dslreg = PIPEDSL(pipe);
5284 u32 temp;
5285
5286 temp = I915_READ(dslreg);
5287 udelay(500);
5288 if (wait_for(I915_READ(dslreg) != temp, 5)) {
5289 if (wait_for(I915_READ(dslreg) != temp, 5))
5290 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5291 }
5292 }
5293
5294 /*
5295 * The hardware phase 0.0 refers to the center of the pixel.
5296 * We want to start from the top/left edge which is phase
5297 * -0.5. That matches how the hardware calculates the scaling
5298 * factors (from top-left of the first pixel to bottom-right
5299 * of the last pixel, as opposed to the pixel centers).
5300 *
5301 * For 4:2:0 subsampled chroma planes we obviously have to
5302 * adjust that so that the chroma sample position lands in
5303 * the right spot.
5304 *
5305 * Note that for packed YCbCr 4:2:2 formats there is no way to
5306 * control chroma siting. The hardware simply replicates the
5307 * chroma samples for both of the luma samples, and thus we don't
5308 * actually get the expected MPEG2 chroma siting convention :(
5309 * The same behaviour is observed on pre-SKL platforms as well.
5310 *
5311 * Theory behind the formula (note that we ignore sub-pixel
5312 * source coordinates):
5313 * s = source sample position
5314 * d = destination sample position
5315 *
5316 * Downscaling 4:1:
5317 * -0.5
5318 * | 0.0
5319 * | | 1.5 (initial phase)
5320 * | | |
5321 * v v v
5322 * | s | s | s | s |
5323 * | d |
5324 *
5325 * Upscaling 1:4:
5326 * -0.5
5327 * | -0.375 (initial phase)
5328 * | | 0.0
5329 * | | |
5330 * v v v
5331 * | s |
5332 * | d | d | d | d |
5333 */
5334 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5335 {
5336 int phase = -0x8000;
5337 u16 trip = 0;
5338
5339 if (chroma_cosited)
5340 phase += (sub - 1) * 0x8000 / sub;
5341
5342 phase += scale / (2 * sub);
5343
5344 /*
5345 * Hardware initial phase limited to [-0.5:1.5].
5346 * Since the max hardware scale factor is 3.0, we
5347 * should never actually excdeed 1.0 here.
5348 */
5349 WARN_ON(phase < -0x8000 || phase > 0x18000);
5350
5351 if (phase < 0)
5352 phase = 0x10000 + phase;
5353 else
5354 trip = PS_PHASE_TRIP;
5355
5356 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5357 }
5358
5359 #define SKL_MIN_SRC_W 8
5360 #define SKL_MAX_SRC_W 4096
5361 #define SKL_MIN_SRC_H 8
5362 #define SKL_MAX_SRC_H 4096
5363 #define SKL_MIN_DST_W 8
5364 #define SKL_MAX_DST_W 4096
5365 #define SKL_MIN_DST_H 8
5366 #define SKL_MAX_DST_H 4096
5367 #define ICL_MAX_SRC_W 5120
5368 #define ICL_MAX_SRC_H 4096
5369 #define ICL_MAX_DST_W 5120
5370 #define ICL_MAX_DST_H 4096
5371 #define SKL_MIN_YUV_420_SRC_W 16
5372 #define SKL_MIN_YUV_420_SRC_H 16
5373
5374 static int
5375 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5376 unsigned int scaler_user, int *scaler_id,
5377 int src_w, int src_h, int dst_w, int dst_h,
5378 const struct drm_format_info *format, bool need_scaler)
5379 {
5380 struct intel_crtc_scaler_state *scaler_state =
5381 &crtc_state->scaler_state;
5382 struct intel_crtc *intel_crtc =
5383 to_intel_crtc(crtc_state->base.crtc);
5384 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5385 const struct drm_display_mode *adjusted_mode =
5386 &crtc_state->base.adjusted_mode;
5387
5388 /*
5389 * Src coordinates are already rotated by 270 degrees for
5390 * the 90/270 degree plane rotation cases (to match the
5391 * GTT mapping), hence no need to account for rotation here.
5392 */
5393 if (src_w != dst_w || src_h != dst_h)
5394 need_scaler = true;
5395
5396 /*
5397 * Scaling/fitting not supported in IF-ID mode in GEN9+
5398 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5399 * Once NV12 is enabled, handle it here while allocating scaler
5400 * for NV12.
5401 */
5402 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5403 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5404 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5405 return -EINVAL;
5406 }
5407
5408 /*
5409 * if plane is being disabled or scaler is no more required or force detach
5410 * - free scaler binded to this plane/crtc
5411 * - in order to do this, update crtc->scaler_usage
5412 *
5413 * Here scaler state in crtc_state is set free so that
5414 * scaler can be assigned to other user. Actual register
5415 * update to free the scaler is done in plane/panel-fit programming.
5416 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5417 */
5418 if (force_detach || !need_scaler) {
5419 if (*scaler_id >= 0) {
5420 scaler_state->scaler_users &= ~(1 << scaler_user);
5421 scaler_state->scalers[*scaler_id].in_use = 0;
5422
5423 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5424 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5425 intel_crtc->pipe, scaler_user, *scaler_id,
5426 scaler_state->scaler_users);
5427 *scaler_id = -1;
5428 }
5429 return 0;
5430 }
5431
5432 if (format && is_planar_yuv_format(format->format) &&
5433 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5434 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5435 return -EINVAL;
5436 }
5437
5438 /* range checks */
5439 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5440 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5441 (INTEL_GEN(dev_priv) >= 11 &&
5442 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5443 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5444 (INTEL_GEN(dev_priv) < 11 &&
5445 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5446 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5447 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5448 "size is out of scaler range\n",
5449 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5450 return -EINVAL;
5451 }
5452
5453 /* mark this plane as a scaler user in crtc_state */
5454 scaler_state->scaler_users |= (1 << scaler_user);
5455 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5456 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5457 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5458 scaler_state->scaler_users);
5459
5460 return 0;
5461 }
5462
5463 /**
5464 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5465 *
5466 * @state: crtc's scaler state
5467 *
5468 * Return
5469 * 0 - scaler_usage updated successfully
5470 * error - requested scaling cannot be supported or other error condition
5471 */
5472 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5473 {
5474 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5475 bool need_scaler = false;
5476
5477 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5478 need_scaler = true;
5479
5480 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5481 &state->scaler_state.scaler_id,
5482 state->pipe_src_w, state->pipe_src_h,
5483 adjusted_mode->crtc_hdisplay,
5484 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5485 }
5486
5487 /**
5488 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5489 * @crtc_state: crtc's scaler state
5490 * @plane_state: atomic plane state to update
5491 *
5492 * Return
5493 * 0 - scaler_usage updated successfully
5494 * error - requested scaling cannot be supported or other error condition
5495 */
5496 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5497 struct intel_plane_state *plane_state)
5498 {
5499 struct intel_plane *intel_plane =
5500 to_intel_plane(plane_state->base.plane);
5501 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5502 struct drm_framebuffer *fb = plane_state->base.fb;
5503 int ret;
5504 bool force_detach = !fb || !plane_state->base.visible;
5505 bool need_scaler = false;
5506
5507 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5508 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5509 fb && is_planar_yuv_format(fb->format->format))
5510 need_scaler = true;
5511
5512 ret = skl_update_scaler(crtc_state, force_detach,
5513 drm_plane_index(&intel_plane->base),
5514 &plane_state->scaler_id,
5515 drm_rect_width(&plane_state->base.src) >> 16,
5516 drm_rect_height(&plane_state->base.src) >> 16,
5517 drm_rect_width(&plane_state->base.dst),
5518 drm_rect_height(&plane_state->base.dst),
5519 fb ? fb->format : NULL, need_scaler);
5520
5521 if (ret || plane_state->scaler_id < 0)
5522 return ret;
5523
5524 /* check colorkey */
5525 if (plane_state->ckey.flags) {
5526 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5527 intel_plane->base.base.id,
5528 intel_plane->base.name);
5529 return -EINVAL;
5530 }
5531
5532 /* Check src format */
5533 switch (fb->format->format) {
5534 case DRM_FORMAT_RGB565:
5535 case DRM_FORMAT_XBGR8888:
5536 case DRM_FORMAT_XRGB8888:
5537 case DRM_FORMAT_ABGR8888:
5538 case DRM_FORMAT_ARGB8888:
5539 case DRM_FORMAT_XRGB2101010:
5540 case DRM_FORMAT_XBGR2101010:
5541 case DRM_FORMAT_XBGR16161616F:
5542 case DRM_FORMAT_ABGR16161616F:
5543 case DRM_FORMAT_XRGB16161616F:
5544 case DRM_FORMAT_ARGB16161616F:
5545 case DRM_FORMAT_YUYV:
5546 case DRM_FORMAT_YVYU:
5547 case DRM_FORMAT_UYVY:
5548 case DRM_FORMAT_VYUY:
5549 case DRM_FORMAT_NV12:
5550 case DRM_FORMAT_P010:
5551 case DRM_FORMAT_P012:
5552 case DRM_FORMAT_P016:
5553 case DRM_FORMAT_Y210:
5554 case DRM_FORMAT_Y212:
5555 case DRM_FORMAT_Y216:
5556 case DRM_FORMAT_XVYU2101010:
5557 case DRM_FORMAT_XVYU12_16161616:
5558 case DRM_FORMAT_XVYU16161616:
5559 break;
5560 default:
5561 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5562 intel_plane->base.base.id, intel_plane->base.name,
5563 fb->base.id, fb->format->format);
5564 return -EINVAL;
5565 }
5566
5567 return 0;
5568 }
5569
5570 static void skylake_scaler_disable(struct intel_crtc *crtc)
5571 {
5572 int i;
5573
5574 for (i = 0; i < crtc->num_scalers; i++)
5575 skl_detach_scaler(crtc, i);
5576 }
5577
5578 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5579 {
5580 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5581 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5582 enum pipe pipe = crtc->pipe;
5583 const struct intel_crtc_scaler_state *scaler_state =
5584 &crtc_state->scaler_state;
5585
5586 if (crtc_state->pch_pfit.enabled) {
5587 u16 uv_rgb_hphase, uv_rgb_vphase;
5588 int pfit_w, pfit_h, hscale, vscale;
5589 int id;
5590
5591 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5592 return;
5593
5594 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5595 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5596
5597 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5598 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5599
5600 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5601 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5602
5603 id = scaler_state->scaler_id;
5604 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5605 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5606 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5607 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5608 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5609 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5610 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5611 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5612 }
5613 }
5614
5615 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5616 {
5617 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5618 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5619 int pipe = crtc->pipe;
5620
5621 if (crtc_state->pch_pfit.enabled) {
5622 /* Force use of hard-coded filter coefficients
5623 * as some pre-programmed values are broken,
5624 * e.g. x201.
5625 */
5626 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5627 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5628 PF_PIPE_SEL_IVB(pipe));
5629 else
5630 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5631 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5632 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5633 }
5634 }
5635
5636 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5637 {
5638 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5639 struct drm_device *dev = crtc->base.dev;
5640 struct drm_i915_private *dev_priv = to_i915(dev);
5641
5642 if (!crtc_state->ips_enabled)
5643 return;
5644
5645 /*
5646 * We can only enable IPS after we enable a plane and wait for a vblank
5647 * This function is called from post_plane_update, which is run after
5648 * a vblank wait.
5649 */
5650 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5651
5652 if (IS_BROADWELL(dev_priv)) {
5653 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5654 IPS_ENABLE | IPS_PCODE_CONTROL));
5655 /* Quoting Art Runyan: "its not safe to expect any particular
5656 * value in IPS_CTL bit 31 after enabling IPS through the
5657 * mailbox." Moreover, the mailbox may return a bogus state,
5658 * so we need to just enable it and continue on.
5659 */
5660 } else {
5661 I915_WRITE(IPS_CTL, IPS_ENABLE);
5662 /* The bit only becomes 1 in the next vblank, so this wait here
5663 * is essentially intel_wait_for_vblank. If we don't have this
5664 * and don't wait for vblanks until the end of crtc_enable, then
5665 * the HW state readout code will complain that the expected
5666 * IPS_CTL value is not the one we read. */
5667 if (intel_wait_for_register(&dev_priv->uncore,
5668 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5669 50))
5670 DRM_ERROR("Timed out waiting for IPS enable\n");
5671 }
5672 }
5673
5674 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5675 {
5676 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5677 struct drm_device *dev = crtc->base.dev;
5678 struct drm_i915_private *dev_priv = to_i915(dev);
5679
5680 if (!crtc_state->ips_enabled)
5681 return;
5682
5683 if (IS_BROADWELL(dev_priv)) {
5684 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5685 /*
5686 * Wait for PCODE to finish disabling IPS. The BSpec specified
5687 * 42ms timeout value leads to occasional timeouts so use 100ms
5688 * instead.
5689 */
5690 if (intel_wait_for_register(&dev_priv->uncore,
5691 IPS_CTL, IPS_ENABLE, 0,
5692 100))
5693 DRM_ERROR("Timed out waiting for IPS disable\n");
5694 } else {
5695 I915_WRITE(IPS_CTL, 0);
5696 POSTING_READ(IPS_CTL);
5697 }
5698
5699 /* We need to wait for a vblank before we can disable the plane. */
5700 intel_wait_for_vblank(dev_priv, crtc->pipe);
5701 }
5702
5703 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5704 {
5705 if (intel_crtc->overlay) {
5706 struct drm_device *dev = intel_crtc->base.dev;
5707
5708 mutex_lock(&dev->struct_mutex);
5709 (void) intel_overlay_switch_off(intel_crtc->overlay);
5710 mutex_unlock(&dev->struct_mutex);
5711 }
5712
5713 /* Let userspace switch the overlay on again. In most cases userspace
5714 * has to recompute where to put it anyway.
5715 */
5716 }
5717
5718 /**
5719 * intel_post_enable_primary - Perform operations after enabling primary plane
5720 * @crtc: the CRTC whose primary plane was just enabled
5721 * @new_crtc_state: the enabling state
5722 *
5723 * Performs potentially sleeping operations that must be done after the primary
5724 * plane is enabled, such as updating FBC and IPS. Note that this may be
5725 * called due to an explicit primary plane update, or due to an implicit
5726 * re-enable that is caused when a sprite plane is updated to no longer
5727 * completely hide the primary plane.
5728 */
5729 static void
5730 intel_post_enable_primary(struct drm_crtc *crtc,
5731 const struct intel_crtc_state *new_crtc_state)
5732 {
5733 struct drm_device *dev = crtc->dev;
5734 struct drm_i915_private *dev_priv = to_i915(dev);
5735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5736 int pipe = intel_crtc->pipe;
5737
5738 /*
5739 * Gen2 reports pipe underruns whenever all planes are disabled.
5740 * So don't enable underrun reporting before at least some planes
5741 * are enabled.
5742 * FIXME: Need to fix the logic to work when we turn off all planes
5743 * but leave the pipe running.
5744 */
5745 if (IS_GEN(dev_priv, 2))
5746 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5747
5748 /* Underruns don't always raise interrupts, so check manually. */
5749 intel_check_cpu_fifo_underruns(dev_priv);
5750 intel_check_pch_fifo_underruns(dev_priv);
5751 }
5752
5753 /* FIXME get rid of this and use pre_plane_update */
5754 static void
5755 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5756 {
5757 struct drm_device *dev = crtc->dev;
5758 struct drm_i915_private *dev_priv = to_i915(dev);
5759 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5760 int pipe = intel_crtc->pipe;
5761
5762 /*
5763 * Gen2 reports pipe underruns whenever all planes are disabled.
5764 * So disable underrun reporting before all the planes get disabled.
5765 */
5766 if (IS_GEN(dev_priv, 2))
5767 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5768
5769 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5770
5771 /*
5772 * Vblank time updates from the shadow to live plane control register
5773 * are blocked if the memory self-refresh mode is active at that
5774 * moment. So to make sure the plane gets truly disabled, disable
5775 * first the self-refresh mode. The self-refresh enable bit in turn
5776 * will be checked/applied by the HW only at the next frame start
5777 * event which is after the vblank start event, so we need to have a
5778 * wait-for-vblank between disabling the plane and the pipe.
5779 */
5780 if (HAS_GMCH(dev_priv) &&
5781 intel_set_memory_cxsr(dev_priv, false))
5782 intel_wait_for_vblank(dev_priv, pipe);
5783 }
5784
5785 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5786 const struct intel_crtc_state *new_crtc_state)
5787 {
5788 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5789 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5790
5791 if (!old_crtc_state->ips_enabled)
5792 return false;
5793
5794 if (needs_modeset(&new_crtc_state->base))
5795 return true;
5796
5797 /*
5798 * Workaround : Do not read or write the pipe palette/gamma data while
5799 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5800 *
5801 * Disable IPS before we program the LUT.
5802 */
5803 if (IS_HASWELL(dev_priv) &&
5804 (new_crtc_state->base.color_mgmt_changed ||
5805 new_crtc_state->update_pipe) &&
5806 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5807 return true;
5808
5809 return !new_crtc_state->ips_enabled;
5810 }
5811
5812 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5813 const struct intel_crtc_state *new_crtc_state)
5814 {
5815 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5816 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5817
5818 if (!new_crtc_state->ips_enabled)
5819 return false;
5820
5821 if (needs_modeset(&new_crtc_state->base))
5822 return true;
5823
5824 /*
5825 * Workaround : Do not read or write the pipe palette/gamma data while
5826 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5827 *
5828 * Re-enable IPS after the LUT has been programmed.
5829 */
5830 if (IS_HASWELL(dev_priv) &&
5831 (new_crtc_state->base.color_mgmt_changed ||
5832 new_crtc_state->update_pipe) &&
5833 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5834 return true;
5835
5836 /*
5837 * We can't read out IPS on broadwell, assume the worst and
5838 * forcibly enable IPS on the first fastset.
5839 */
5840 if (new_crtc_state->update_pipe &&
5841 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5842 return true;
5843
5844 return !old_crtc_state->ips_enabled;
5845 }
5846
5847 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5848 const struct intel_crtc_state *crtc_state)
5849 {
5850 if (!crtc_state->nv12_planes)
5851 return false;
5852
5853 /* WA Display #0827: Gen9:all */
5854 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5855 return true;
5856
5857 return false;
5858 }
5859
5860 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5861 const struct intel_crtc_state *crtc_state)
5862 {
5863 /* Wa_2006604312:icl */
5864 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5865 return true;
5866
5867 return false;
5868 }
5869
5870 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5871 {
5872 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5873 struct drm_device *dev = crtc->base.dev;
5874 struct drm_i915_private *dev_priv = to_i915(dev);
5875 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5876 struct intel_crtc_state *pipe_config =
5877 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5878 crtc);
5879 struct drm_plane *primary = crtc->base.primary;
5880 struct drm_plane_state *old_primary_state =
5881 drm_atomic_get_old_plane_state(old_state, primary);
5882
5883 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5884
5885 if (pipe_config->update_wm_post && pipe_config->base.active)
5886 intel_update_watermarks(crtc);
5887
5888 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5889 hsw_enable_ips(pipe_config);
5890
5891 if (old_primary_state) {
5892 struct drm_plane_state *new_primary_state =
5893 drm_atomic_get_new_plane_state(old_state, primary);
5894
5895 intel_fbc_post_update(crtc);
5896
5897 if (new_primary_state->visible &&
5898 (needs_modeset(&pipe_config->base) ||
5899 !old_primary_state->visible))
5900 intel_post_enable_primary(&crtc->base, pipe_config);
5901 }
5902
5903 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5904 !needs_nv12_wa(dev_priv, pipe_config))
5905 skl_wa_827(dev_priv, crtc->pipe, false);
5906
5907 if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5908 !needs_scalerclk_wa(dev_priv, pipe_config))
5909 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5910 }
5911
5912 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5913 struct intel_crtc_state *pipe_config)
5914 {
5915 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5916 struct drm_device *dev = crtc->base.dev;
5917 struct drm_i915_private *dev_priv = to_i915(dev);
5918 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5919 struct drm_plane *primary = crtc->base.primary;
5920 struct drm_plane_state *old_primary_state =
5921 drm_atomic_get_old_plane_state(old_state, primary);
5922 bool modeset = needs_modeset(&pipe_config->base);
5923 struct intel_atomic_state *old_intel_state =
5924 to_intel_atomic_state(old_state);
5925
5926 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5927 hsw_disable_ips(old_crtc_state);
5928
5929 if (old_primary_state) {
5930 struct intel_plane_state *new_primary_state =
5931 intel_atomic_get_new_plane_state(old_intel_state,
5932 to_intel_plane(primary));
5933
5934 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5935 /*
5936 * Gen2 reports pipe underruns whenever all planes are disabled.
5937 * So disable underrun reporting before all the planes get disabled.
5938 */
5939 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5940 (modeset || !new_primary_state->base.visible))
5941 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5942 }
5943
5944 /* Display WA 827 */
5945 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5946 needs_nv12_wa(dev_priv, pipe_config))
5947 skl_wa_827(dev_priv, crtc->pipe, true);
5948
5949 /* Wa_2006604312:icl */
5950 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5951 needs_scalerclk_wa(dev_priv, pipe_config))
5952 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5953
5954 /*
5955 * Vblank time updates from the shadow to live plane control register
5956 * are blocked if the memory self-refresh mode is active at that
5957 * moment. So to make sure the plane gets truly disabled, disable
5958 * first the self-refresh mode. The self-refresh enable bit in turn
5959 * will be checked/applied by the HW only at the next frame start
5960 * event which is after the vblank start event, so we need to have a
5961 * wait-for-vblank between disabling the plane and the pipe.
5962 */
5963 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5964 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5965 intel_wait_for_vblank(dev_priv, crtc->pipe);
5966
5967 /*
5968 * IVB workaround: must disable low power watermarks for at least
5969 * one frame before enabling scaling. LP watermarks can be re-enabled
5970 * when scaling is disabled.
5971 *
5972 * WaCxSRDisabledForSpriteScaling:ivb
5973 */
5974 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5975 old_crtc_state->base.active)
5976 intel_wait_for_vblank(dev_priv, crtc->pipe);
5977
5978 /*
5979 * If we're doing a modeset, we're done. No need to do any pre-vblank
5980 * watermark programming here.
5981 */
5982 if (needs_modeset(&pipe_config->base))
5983 return;
5984
5985 /*
5986 * For platforms that support atomic watermarks, program the
5987 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5988 * will be the intermediate values that are safe for both pre- and
5989 * post- vblank; when vblank happens, the 'active' values will be set
5990 * to the final 'target' values and we'll do this again to get the
5991 * optimal watermarks. For gen9+ platforms, the values we program here
5992 * will be the final target values which will get automatically latched
5993 * at vblank time; no further programming will be necessary.
5994 *
5995 * If a platform hasn't been transitioned to atomic watermarks yet,
5996 * we'll continue to update watermarks the old way, if flags tell
5997 * us to.
5998 */
5999 if (dev_priv->display.initial_watermarks != NULL)
6000 dev_priv->display.initial_watermarks(old_intel_state,
6001 pipe_config);
6002 else if (pipe_config->update_wm_pre)
6003 intel_update_watermarks(crtc);
6004 }
6005
6006 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6007 struct intel_crtc *crtc)
6008 {
6009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6010 const struct intel_crtc_state *new_crtc_state =
6011 intel_atomic_get_new_crtc_state(state, crtc);
6012 unsigned int update_mask = new_crtc_state->update_planes;
6013 const struct intel_plane_state *old_plane_state;
6014 struct intel_plane *plane;
6015 unsigned fb_bits = 0;
6016 int i;
6017
6018 intel_crtc_dpms_overlay_disable(crtc);
6019
6020 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6021 if (crtc->pipe != plane->pipe ||
6022 !(update_mask & BIT(plane->id)))
6023 continue;
6024
6025 intel_disable_plane(plane, new_crtc_state);
6026
6027 if (old_plane_state->base.visible)
6028 fb_bits |= plane->frontbuffer_bit;
6029 }
6030
6031 intel_frontbuffer_flip(dev_priv, fb_bits);
6032 }
6033
6034 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
6035 struct intel_crtc_state *crtc_state,
6036 struct drm_atomic_state *old_state)
6037 {
6038 struct drm_connector_state *conn_state;
6039 struct drm_connector *conn;
6040 int i;
6041
6042 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6043 struct intel_encoder *encoder =
6044 to_intel_encoder(conn_state->best_encoder);
6045
6046 if (conn_state->crtc != crtc)
6047 continue;
6048
6049 if (encoder->pre_pll_enable)
6050 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6051 }
6052 }
6053
6054 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
6055 struct intel_crtc_state *crtc_state,
6056 struct drm_atomic_state *old_state)
6057 {
6058 struct drm_connector_state *conn_state;
6059 struct drm_connector *conn;
6060 int i;
6061
6062 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6063 struct intel_encoder *encoder =
6064 to_intel_encoder(conn_state->best_encoder);
6065
6066 if (conn_state->crtc != crtc)
6067 continue;
6068
6069 if (encoder->pre_enable)
6070 encoder->pre_enable(encoder, crtc_state, conn_state);
6071 }
6072 }
6073
6074 static void intel_encoders_enable(struct drm_crtc *crtc,
6075 struct intel_crtc_state *crtc_state,
6076 struct drm_atomic_state *old_state)
6077 {
6078 struct drm_connector_state *conn_state;
6079 struct drm_connector *conn;
6080 int i;
6081
6082 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6083 struct intel_encoder *encoder =
6084 to_intel_encoder(conn_state->best_encoder);
6085
6086 if (conn_state->crtc != crtc)
6087 continue;
6088
6089 if (encoder->enable)
6090 encoder->enable(encoder, crtc_state, conn_state);
6091 intel_opregion_notify_encoder(encoder, true);
6092 }
6093 }
6094
6095 static void intel_encoders_disable(struct drm_crtc *crtc,
6096 struct intel_crtc_state *old_crtc_state,
6097 struct drm_atomic_state *old_state)
6098 {
6099 struct drm_connector_state *old_conn_state;
6100 struct drm_connector *conn;
6101 int i;
6102
6103 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6104 struct intel_encoder *encoder =
6105 to_intel_encoder(old_conn_state->best_encoder);
6106
6107 if (old_conn_state->crtc != crtc)
6108 continue;
6109
6110 intel_opregion_notify_encoder(encoder, false);
6111 if (encoder->disable)
6112 encoder->disable(encoder, old_crtc_state, old_conn_state);
6113 }
6114 }
6115
6116 static void intel_encoders_post_disable(struct drm_crtc *crtc,
6117 struct intel_crtc_state *old_crtc_state,
6118 struct drm_atomic_state *old_state)
6119 {
6120 struct drm_connector_state *old_conn_state;
6121 struct drm_connector *conn;
6122 int i;
6123
6124 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6125 struct intel_encoder *encoder =
6126 to_intel_encoder(old_conn_state->best_encoder);
6127
6128 if (old_conn_state->crtc != crtc)
6129 continue;
6130
6131 if (encoder->post_disable)
6132 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6133 }
6134 }
6135
6136 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
6137 struct intel_crtc_state *old_crtc_state,
6138 struct drm_atomic_state *old_state)
6139 {
6140 struct drm_connector_state *old_conn_state;
6141 struct drm_connector *conn;
6142 int i;
6143
6144 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6145 struct intel_encoder *encoder =
6146 to_intel_encoder(old_conn_state->best_encoder);
6147
6148 if (old_conn_state->crtc != crtc)
6149 continue;
6150
6151 if (encoder->post_pll_disable)
6152 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6153 }
6154 }
6155
6156 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
6157 struct intel_crtc_state *crtc_state,
6158 struct drm_atomic_state *old_state)
6159 {
6160 struct drm_connector_state *conn_state;
6161 struct drm_connector *conn;
6162 int i;
6163
6164 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6165 struct intel_encoder *encoder =
6166 to_intel_encoder(conn_state->best_encoder);
6167
6168 if (conn_state->crtc != crtc)
6169 continue;
6170
6171 if (encoder->update_pipe)
6172 encoder->update_pipe(encoder, crtc_state, conn_state);
6173 }
6174 }
6175
6176 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6177 {
6178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6179 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6180
6181 plane->disable_plane(plane, crtc_state);
6182 }
6183
6184 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6185 struct drm_atomic_state *old_state)
6186 {
6187 struct drm_crtc *crtc = pipe_config->base.crtc;
6188 struct drm_device *dev = crtc->dev;
6189 struct drm_i915_private *dev_priv = to_i915(dev);
6190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6191 int pipe = intel_crtc->pipe;
6192 struct intel_atomic_state *old_intel_state =
6193 to_intel_atomic_state(old_state);
6194
6195 if (WARN_ON(intel_crtc->active))
6196 return;
6197
6198 /*
6199 * Sometimes spurious CPU pipe underruns happen during FDI
6200 * training, at least with VGA+HDMI cloning. Suppress them.
6201 *
6202 * On ILK we get an occasional spurious CPU pipe underruns
6203 * between eDP port A enable and vdd enable. Also PCH port
6204 * enable seems to result in the occasional CPU pipe underrun.
6205 *
6206 * Spurious PCH underruns also occur during PCH enabling.
6207 */
6208 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6209 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6210
6211 if (pipe_config->has_pch_encoder)
6212 intel_prepare_shared_dpll(pipe_config);
6213
6214 if (intel_crtc_has_dp_encoder(pipe_config))
6215 intel_dp_set_m_n(pipe_config, M1_N1);
6216
6217 intel_set_pipe_timings(pipe_config);
6218 intel_set_pipe_src_size(pipe_config);
6219
6220 if (pipe_config->has_pch_encoder) {
6221 intel_cpu_transcoder_set_m_n(pipe_config,
6222 &pipe_config->fdi_m_n, NULL);
6223 }
6224
6225 ironlake_set_pipeconf(pipe_config);
6226
6227 intel_crtc->active = true;
6228
6229 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6230
6231 if (pipe_config->has_pch_encoder) {
6232 /* Note: FDI PLL enabling _must_ be done before we enable the
6233 * cpu pipes, hence this is separate from all the other fdi/pch
6234 * enabling. */
6235 ironlake_fdi_pll_enable(pipe_config);
6236 } else {
6237 assert_fdi_tx_disabled(dev_priv, pipe);
6238 assert_fdi_rx_disabled(dev_priv, pipe);
6239 }
6240
6241 ironlake_pfit_enable(pipe_config);
6242
6243 /*
6244 * On ILK+ LUT must be loaded before the pipe is running but with
6245 * clocks enabled
6246 */
6247 intel_color_load_luts(pipe_config);
6248 intel_color_commit(pipe_config);
6249 /* update DSPCNTR to configure gamma for pipe bottom color */
6250 intel_disable_primary_plane(pipe_config);
6251
6252 if (dev_priv->display.initial_watermarks != NULL)
6253 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6254 intel_enable_pipe(pipe_config);
6255
6256 if (pipe_config->has_pch_encoder)
6257 ironlake_pch_enable(old_intel_state, pipe_config);
6258
6259 assert_vblank_disabled(crtc);
6260 intel_crtc_vblank_on(pipe_config);
6261
6262 intel_encoders_enable(crtc, pipe_config, old_state);
6263
6264 if (HAS_PCH_CPT(dev_priv))
6265 cpt_verify_modeset(dev, intel_crtc->pipe);
6266
6267 /*
6268 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6269 * And a second vblank wait is needed at least on ILK with
6270 * some interlaced HDMI modes. Let's do the double wait always
6271 * in case there are more corner cases we don't know about.
6272 */
6273 if (pipe_config->has_pch_encoder) {
6274 intel_wait_for_vblank(dev_priv, pipe);
6275 intel_wait_for_vblank(dev_priv, pipe);
6276 }
6277 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6278 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6279 }
6280
6281 /* IPS only exists on ULT machines and is tied to pipe A. */
6282 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6283 {
6284 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6285 }
6286
6287 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6288 enum pipe pipe, bool apply)
6289 {
6290 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6291 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6292
6293 if (apply)
6294 val |= mask;
6295 else
6296 val &= ~mask;
6297
6298 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6299 }
6300
6301 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6302 {
6303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6304 enum pipe pipe = crtc->pipe;
6305 u32 val;
6306
6307 val = MBUS_DBOX_A_CREDIT(2);
6308 val |= MBUS_DBOX_BW_CREDIT(1);
6309 val |= MBUS_DBOX_B_CREDIT(8);
6310
6311 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6312 }
6313
6314 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6315 struct drm_atomic_state *old_state)
6316 {
6317 struct drm_crtc *crtc = pipe_config->base.crtc;
6318 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6320 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6321 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6322 struct intel_atomic_state *old_intel_state =
6323 to_intel_atomic_state(old_state);
6324 bool psl_clkgate_wa;
6325
6326 if (WARN_ON(intel_crtc->active))
6327 return;
6328
6329 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6330
6331 if (pipe_config->shared_dpll)
6332 intel_enable_shared_dpll(pipe_config);
6333
6334 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6335
6336 if (intel_crtc_has_dp_encoder(pipe_config))
6337 intel_dp_set_m_n(pipe_config, M1_N1);
6338
6339 if (!transcoder_is_dsi(cpu_transcoder))
6340 intel_set_pipe_timings(pipe_config);
6341
6342 intel_set_pipe_src_size(pipe_config);
6343
6344 if (cpu_transcoder != TRANSCODER_EDP &&
6345 !transcoder_is_dsi(cpu_transcoder)) {
6346 I915_WRITE(PIPE_MULT(cpu_transcoder),
6347 pipe_config->pixel_multiplier - 1);
6348 }
6349
6350 if (pipe_config->has_pch_encoder) {
6351 intel_cpu_transcoder_set_m_n(pipe_config,
6352 &pipe_config->fdi_m_n, NULL);
6353 }
6354
6355 if (!transcoder_is_dsi(cpu_transcoder))
6356 haswell_set_pipeconf(pipe_config);
6357
6358 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6359 bdw_set_pipemisc(pipe_config);
6360
6361 intel_crtc->active = true;
6362
6363 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6364 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6365 pipe_config->pch_pfit.enabled;
6366 if (psl_clkgate_wa)
6367 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6368
6369 if (INTEL_GEN(dev_priv) >= 9)
6370 skylake_pfit_enable(pipe_config);
6371 else
6372 ironlake_pfit_enable(pipe_config);
6373
6374 /*
6375 * On ILK+ LUT must be loaded before the pipe is running but with
6376 * clocks enabled
6377 */
6378 intel_color_load_luts(pipe_config);
6379 intel_color_commit(pipe_config);
6380 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6381 if (INTEL_GEN(dev_priv) < 9)
6382 intel_disable_primary_plane(pipe_config);
6383
6384 if (INTEL_GEN(dev_priv) >= 11)
6385 icl_set_pipe_chicken(intel_crtc);
6386
6387 intel_ddi_set_pipe_settings(pipe_config);
6388 if (!transcoder_is_dsi(cpu_transcoder))
6389 intel_ddi_enable_transcoder_func(pipe_config);
6390
6391 if (dev_priv->display.initial_watermarks != NULL)
6392 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6393
6394 if (INTEL_GEN(dev_priv) >= 11)
6395 icl_pipe_mbus_enable(intel_crtc);
6396
6397 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6398 if (!transcoder_is_dsi(cpu_transcoder))
6399 intel_enable_pipe(pipe_config);
6400
6401 if (pipe_config->has_pch_encoder)
6402 lpt_pch_enable(old_intel_state, pipe_config);
6403
6404 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6405 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6406
6407 assert_vblank_disabled(crtc);
6408 intel_crtc_vblank_on(pipe_config);
6409
6410 intel_encoders_enable(crtc, pipe_config, old_state);
6411
6412 if (psl_clkgate_wa) {
6413 intel_wait_for_vblank(dev_priv, pipe);
6414 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6415 }
6416
6417 /* If we change the relative order between pipe/planes enabling, we need
6418 * to change the workaround. */
6419 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6420 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6421 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6422 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6423 }
6424 }
6425
6426 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6427 {
6428 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6429 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6430 enum pipe pipe = crtc->pipe;
6431
6432 /* To avoid upsetting the power well on haswell only disable the pfit if
6433 * it's in use. The hw state code will make sure we get this right. */
6434 if (old_crtc_state->pch_pfit.enabled) {
6435 I915_WRITE(PF_CTL(pipe), 0);
6436 I915_WRITE(PF_WIN_POS(pipe), 0);
6437 I915_WRITE(PF_WIN_SZ(pipe), 0);
6438 }
6439 }
6440
6441 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6442 struct drm_atomic_state *old_state)
6443 {
6444 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6445 struct drm_device *dev = crtc->dev;
6446 struct drm_i915_private *dev_priv = to_i915(dev);
6447 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6448 int pipe = intel_crtc->pipe;
6449
6450 /*
6451 * Sometimes spurious CPU pipe underruns happen when the
6452 * pipe is already disabled, but FDI RX/TX is still enabled.
6453 * Happens at least with VGA+HDMI cloning. Suppress them.
6454 */
6455 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6456 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6457
6458 intel_encoders_disable(crtc, old_crtc_state, old_state);
6459
6460 drm_crtc_vblank_off(crtc);
6461 assert_vblank_disabled(crtc);
6462
6463 intel_disable_pipe(old_crtc_state);
6464
6465 ironlake_pfit_disable(old_crtc_state);
6466
6467 if (old_crtc_state->has_pch_encoder)
6468 ironlake_fdi_disable(crtc);
6469
6470 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6471
6472 if (old_crtc_state->has_pch_encoder) {
6473 ironlake_disable_pch_transcoder(dev_priv, pipe);
6474
6475 if (HAS_PCH_CPT(dev_priv)) {
6476 i915_reg_t reg;
6477 u32 temp;
6478
6479 /* disable TRANS_DP_CTL */
6480 reg = TRANS_DP_CTL(pipe);
6481 temp = I915_READ(reg);
6482 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6483 TRANS_DP_PORT_SEL_MASK);
6484 temp |= TRANS_DP_PORT_SEL_NONE;
6485 I915_WRITE(reg, temp);
6486
6487 /* disable DPLL_SEL */
6488 temp = I915_READ(PCH_DPLL_SEL);
6489 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6490 I915_WRITE(PCH_DPLL_SEL, temp);
6491 }
6492
6493 ironlake_fdi_pll_disable(intel_crtc);
6494 }
6495
6496 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6497 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6498 }
6499
6500 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6501 struct drm_atomic_state *old_state)
6502 {
6503 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6504 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6505 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6506 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6507
6508 intel_encoders_disable(crtc, old_crtc_state, old_state);
6509
6510 drm_crtc_vblank_off(crtc);
6511 assert_vblank_disabled(crtc);
6512
6513 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6514 if (!transcoder_is_dsi(cpu_transcoder))
6515 intel_disable_pipe(old_crtc_state);
6516
6517 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6518 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6519
6520 if (!transcoder_is_dsi(cpu_transcoder))
6521 intel_ddi_disable_transcoder_func(old_crtc_state);
6522
6523 intel_dsc_disable(old_crtc_state);
6524
6525 if (INTEL_GEN(dev_priv) >= 9)
6526 skylake_scaler_disable(intel_crtc);
6527 else
6528 ironlake_pfit_disable(old_crtc_state);
6529
6530 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6531
6532 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6533 }
6534
6535 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6536 {
6537 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6539
6540 if (!crtc_state->gmch_pfit.control)
6541 return;
6542
6543 /*
6544 * The panel fitter should only be adjusted whilst the pipe is disabled,
6545 * according to register description and PRM.
6546 */
6547 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6548 assert_pipe_disabled(dev_priv, crtc->pipe);
6549
6550 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6551 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6552
6553 /* Border color in case we don't scale up to the full screen. Black by
6554 * default, change to something else for debugging. */
6555 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6556 }
6557
6558 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6559 {
6560 if (port == PORT_NONE)
6561 return false;
6562
6563 if (IS_ELKHARTLAKE(dev_priv))
6564 return port <= PORT_C;
6565
6566 if (INTEL_GEN(dev_priv) >= 11)
6567 return port <= PORT_B;
6568
6569 return false;
6570 }
6571
6572 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6573 {
6574 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6575 return port >= PORT_C && port <= PORT_F;
6576
6577 return false;
6578 }
6579
6580 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6581 {
6582 if (!intel_port_is_tc(dev_priv, port))
6583 return PORT_TC_NONE;
6584
6585 return port - PORT_C;
6586 }
6587
6588 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6589 {
6590 switch (port) {
6591 case PORT_A:
6592 return POWER_DOMAIN_PORT_DDI_A_LANES;
6593 case PORT_B:
6594 return POWER_DOMAIN_PORT_DDI_B_LANES;
6595 case PORT_C:
6596 return POWER_DOMAIN_PORT_DDI_C_LANES;
6597 case PORT_D:
6598 return POWER_DOMAIN_PORT_DDI_D_LANES;
6599 case PORT_E:
6600 return POWER_DOMAIN_PORT_DDI_E_LANES;
6601 case PORT_F:
6602 return POWER_DOMAIN_PORT_DDI_F_LANES;
6603 default:
6604 MISSING_CASE(port);
6605 return POWER_DOMAIN_PORT_OTHER;
6606 }
6607 }
6608
6609 enum intel_display_power_domain
6610 intel_aux_power_domain(struct intel_digital_port *dig_port)
6611 {
6612 switch (dig_port->aux_ch) {
6613 case AUX_CH_A:
6614 return POWER_DOMAIN_AUX_A;
6615 case AUX_CH_B:
6616 return POWER_DOMAIN_AUX_B;
6617 case AUX_CH_C:
6618 return POWER_DOMAIN_AUX_C;
6619 case AUX_CH_D:
6620 return POWER_DOMAIN_AUX_D;
6621 case AUX_CH_E:
6622 return POWER_DOMAIN_AUX_E;
6623 case AUX_CH_F:
6624 return POWER_DOMAIN_AUX_F;
6625 default:
6626 MISSING_CASE(dig_port->aux_ch);
6627 return POWER_DOMAIN_AUX_A;
6628 }
6629 }
6630
6631 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6632 struct intel_crtc_state *crtc_state)
6633 {
6634 struct drm_device *dev = crtc->dev;
6635 struct drm_i915_private *dev_priv = to_i915(dev);
6636 struct drm_encoder *encoder;
6637 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6638 enum pipe pipe = intel_crtc->pipe;
6639 u64 mask;
6640 enum transcoder transcoder = crtc_state->cpu_transcoder;
6641
6642 if (!crtc_state->base.active)
6643 return 0;
6644
6645 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6646 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6647 if (crtc_state->pch_pfit.enabled ||
6648 crtc_state->pch_pfit.force_thru)
6649 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6650
6651 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6652 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6653
6654 mask |= BIT_ULL(intel_encoder->power_domain);
6655 }
6656
6657 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6658 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6659
6660 if (crtc_state->shared_dpll)
6661 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6662
6663 return mask;
6664 }
6665
6666 static u64
6667 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6668 struct intel_crtc_state *crtc_state)
6669 {
6670 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6671 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6672 enum intel_display_power_domain domain;
6673 u64 domains, new_domains, old_domains;
6674
6675 old_domains = intel_crtc->enabled_power_domains;
6676 intel_crtc->enabled_power_domains = new_domains =
6677 get_crtc_power_domains(crtc, crtc_state);
6678
6679 domains = new_domains & ~old_domains;
6680
6681 for_each_power_domain(domain, domains)
6682 intel_display_power_get(dev_priv, domain);
6683
6684 return old_domains & ~new_domains;
6685 }
6686
6687 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6688 u64 domains)
6689 {
6690 enum intel_display_power_domain domain;
6691
6692 for_each_power_domain(domain, domains)
6693 intel_display_power_put_unchecked(dev_priv, domain);
6694 }
6695
6696 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6697 struct drm_atomic_state *old_state)
6698 {
6699 struct intel_atomic_state *old_intel_state =
6700 to_intel_atomic_state(old_state);
6701 struct drm_crtc *crtc = pipe_config->base.crtc;
6702 struct drm_device *dev = crtc->dev;
6703 struct drm_i915_private *dev_priv = to_i915(dev);
6704 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6705 int pipe = intel_crtc->pipe;
6706
6707 if (WARN_ON(intel_crtc->active))
6708 return;
6709
6710 if (intel_crtc_has_dp_encoder(pipe_config))
6711 intel_dp_set_m_n(pipe_config, M1_N1);
6712
6713 intel_set_pipe_timings(pipe_config);
6714 intel_set_pipe_src_size(pipe_config);
6715
6716 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6717 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6718 I915_WRITE(CHV_CANVAS(pipe), 0);
6719 }
6720
6721 i9xx_set_pipeconf(pipe_config);
6722
6723 intel_crtc->active = true;
6724
6725 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6726
6727 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6728
6729 if (IS_CHERRYVIEW(dev_priv)) {
6730 chv_prepare_pll(intel_crtc, pipe_config);
6731 chv_enable_pll(intel_crtc, pipe_config);
6732 } else {
6733 vlv_prepare_pll(intel_crtc, pipe_config);
6734 vlv_enable_pll(intel_crtc, pipe_config);
6735 }
6736
6737 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6738
6739 i9xx_pfit_enable(pipe_config);
6740
6741 intel_color_load_luts(pipe_config);
6742 intel_color_commit(pipe_config);
6743 /* update DSPCNTR to configure gamma for pipe bottom color */
6744 intel_disable_primary_plane(pipe_config);
6745
6746 dev_priv->display.initial_watermarks(old_intel_state,
6747 pipe_config);
6748 intel_enable_pipe(pipe_config);
6749
6750 assert_vblank_disabled(crtc);
6751 intel_crtc_vblank_on(pipe_config);
6752
6753 intel_encoders_enable(crtc, pipe_config, old_state);
6754 }
6755
6756 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6757 {
6758 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6759 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6760
6761 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6762 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6763 }
6764
6765 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6766 struct drm_atomic_state *old_state)
6767 {
6768 struct intel_atomic_state *old_intel_state =
6769 to_intel_atomic_state(old_state);
6770 struct drm_crtc *crtc = pipe_config->base.crtc;
6771 struct drm_device *dev = crtc->dev;
6772 struct drm_i915_private *dev_priv = to_i915(dev);
6773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6774 enum pipe pipe = intel_crtc->pipe;
6775
6776 if (WARN_ON(intel_crtc->active))
6777 return;
6778
6779 i9xx_set_pll_dividers(pipe_config);
6780
6781 if (intel_crtc_has_dp_encoder(pipe_config))
6782 intel_dp_set_m_n(pipe_config, M1_N1);
6783
6784 intel_set_pipe_timings(pipe_config);
6785 intel_set_pipe_src_size(pipe_config);
6786
6787 i9xx_set_pipeconf(pipe_config);
6788
6789 intel_crtc->active = true;
6790
6791 if (!IS_GEN(dev_priv, 2))
6792 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6793
6794 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6795
6796 i9xx_enable_pll(intel_crtc, pipe_config);
6797
6798 i9xx_pfit_enable(pipe_config);
6799
6800 intel_color_load_luts(pipe_config);
6801 intel_color_commit(pipe_config);
6802 /* update DSPCNTR to configure gamma for pipe bottom color */
6803 intel_disable_primary_plane(pipe_config);
6804
6805 if (dev_priv->display.initial_watermarks != NULL)
6806 dev_priv->display.initial_watermarks(old_intel_state,
6807 pipe_config);
6808 else
6809 intel_update_watermarks(intel_crtc);
6810 intel_enable_pipe(pipe_config);
6811
6812 assert_vblank_disabled(crtc);
6813 intel_crtc_vblank_on(pipe_config);
6814
6815 intel_encoders_enable(crtc, pipe_config, old_state);
6816 }
6817
6818 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6819 {
6820 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6822
6823 if (!old_crtc_state->gmch_pfit.control)
6824 return;
6825
6826 assert_pipe_disabled(dev_priv, crtc->pipe);
6827
6828 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6829 I915_READ(PFIT_CONTROL));
6830 I915_WRITE(PFIT_CONTROL, 0);
6831 }
6832
6833 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6834 struct drm_atomic_state *old_state)
6835 {
6836 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6837 struct drm_device *dev = crtc->dev;
6838 struct drm_i915_private *dev_priv = to_i915(dev);
6839 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6840 int pipe = intel_crtc->pipe;
6841
6842 /*
6843 * On gen2 planes are double buffered but the pipe isn't, so we must
6844 * wait for planes to fully turn off before disabling the pipe.
6845 */
6846 if (IS_GEN(dev_priv, 2))
6847 intel_wait_for_vblank(dev_priv, pipe);
6848
6849 intel_encoders_disable(crtc, old_crtc_state, old_state);
6850
6851 drm_crtc_vblank_off(crtc);
6852 assert_vblank_disabled(crtc);
6853
6854 intel_disable_pipe(old_crtc_state);
6855
6856 i9xx_pfit_disable(old_crtc_state);
6857
6858 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6859
6860 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6861 if (IS_CHERRYVIEW(dev_priv))
6862 chv_disable_pll(dev_priv, pipe);
6863 else if (IS_VALLEYVIEW(dev_priv))
6864 vlv_disable_pll(dev_priv, pipe);
6865 else
6866 i9xx_disable_pll(old_crtc_state);
6867 }
6868
6869 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6870
6871 if (!IS_GEN(dev_priv, 2))
6872 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6873
6874 if (!dev_priv->display.initial_watermarks)
6875 intel_update_watermarks(intel_crtc);
6876
6877 /* clock the pipe down to 640x480@60 to potentially save power */
6878 if (IS_I830(dev_priv))
6879 i830_enable_pipe(dev_priv, pipe);
6880 }
6881
6882 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6883 struct drm_modeset_acquire_ctx *ctx)
6884 {
6885 struct intel_encoder *encoder;
6886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6887 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6888 struct intel_bw_state *bw_state =
6889 to_intel_bw_state(dev_priv->bw_obj.state);
6890 enum intel_display_power_domain domain;
6891 struct intel_plane *plane;
6892 u64 domains;
6893 struct drm_atomic_state *state;
6894 struct intel_crtc_state *crtc_state;
6895 int ret;
6896
6897 if (!intel_crtc->active)
6898 return;
6899
6900 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6901 const struct intel_plane_state *plane_state =
6902 to_intel_plane_state(plane->base.state);
6903
6904 if (plane_state->base.visible)
6905 intel_plane_disable_noatomic(intel_crtc, plane);
6906 }
6907
6908 state = drm_atomic_state_alloc(crtc->dev);
6909 if (!state) {
6910 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6911 crtc->base.id, crtc->name);
6912 return;
6913 }
6914
6915 state->acquire_ctx = ctx;
6916
6917 /* Everything's already locked, -EDEADLK can't happen. */
6918 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6919 ret = drm_atomic_add_affected_connectors(state, crtc);
6920
6921 WARN_ON(IS_ERR(crtc_state) || ret);
6922
6923 dev_priv->display.crtc_disable(crtc_state, state);
6924
6925 drm_atomic_state_put(state);
6926
6927 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6928 crtc->base.id, crtc->name);
6929
6930 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6931 crtc->state->active = false;
6932 intel_crtc->active = false;
6933 crtc->enabled = false;
6934 crtc->state->connector_mask = 0;
6935 crtc->state->encoder_mask = 0;
6936
6937 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6938 encoder->base.crtc = NULL;
6939
6940 intel_fbc_disable(intel_crtc);
6941 intel_update_watermarks(intel_crtc);
6942 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6943
6944 domains = intel_crtc->enabled_power_domains;
6945 for_each_power_domain(domain, domains)
6946 intel_display_power_put_unchecked(dev_priv, domain);
6947 intel_crtc->enabled_power_domains = 0;
6948
6949 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6950 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6951 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6952
6953 bw_state->data_rate[intel_crtc->pipe] = 0;
6954 bw_state->num_active_planes[intel_crtc->pipe] = 0;
6955 }
6956
6957 /*
6958 * turn all crtc's off, but do not adjust state
6959 * This has to be paired with a call to intel_modeset_setup_hw_state.
6960 */
6961 int intel_display_suspend(struct drm_device *dev)
6962 {
6963 struct drm_i915_private *dev_priv = to_i915(dev);
6964 struct drm_atomic_state *state;
6965 int ret;
6966
6967 state = drm_atomic_helper_suspend(dev);
6968 ret = PTR_ERR_OR_ZERO(state);
6969 if (ret)
6970 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6971 else
6972 dev_priv->modeset_restore_state = state;
6973 return ret;
6974 }
6975
6976 void intel_encoder_destroy(struct drm_encoder *encoder)
6977 {
6978 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6979
6980 drm_encoder_cleanup(encoder);
6981 kfree(intel_encoder);
6982 }
6983
6984 /* Cross check the actual hw state with our own modeset state tracking (and it's
6985 * internal consistency). */
6986 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6987 struct drm_connector_state *conn_state)
6988 {
6989 struct intel_connector *connector = to_intel_connector(conn_state->connector);
6990
6991 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6992 connector->base.base.id,
6993 connector->base.name);
6994
6995 if (connector->get_hw_state(connector)) {
6996 struct intel_encoder *encoder = connector->encoder;
6997
6998 I915_STATE_WARN(!crtc_state,
6999 "connector enabled without attached crtc\n");
7000
7001 if (!crtc_state)
7002 return;
7003
7004 I915_STATE_WARN(!crtc_state->active,
7005 "connector is active, but attached crtc isn't\n");
7006
7007 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7008 return;
7009
7010 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7011 "atomic encoder doesn't match attached encoder\n");
7012
7013 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7014 "attached encoder crtc differs from connector crtc\n");
7015 } else {
7016 I915_STATE_WARN(crtc_state && crtc_state->active,
7017 "attached crtc is active, but connector isn't\n");
7018 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7019 "best encoder set without crtc!\n");
7020 }
7021 }
7022
7023 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7024 {
7025 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7026 return crtc_state->fdi_lanes;
7027
7028 return 0;
7029 }
7030
7031 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7032 struct intel_crtc_state *pipe_config)
7033 {
7034 struct drm_i915_private *dev_priv = to_i915(dev);
7035 struct drm_atomic_state *state = pipe_config->base.state;
7036 struct intel_crtc *other_crtc;
7037 struct intel_crtc_state *other_crtc_state;
7038
7039 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7040 pipe_name(pipe), pipe_config->fdi_lanes);
7041 if (pipe_config->fdi_lanes > 4) {
7042 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7043 pipe_name(pipe), pipe_config->fdi_lanes);
7044 return -EINVAL;
7045 }
7046
7047 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7048 if (pipe_config->fdi_lanes > 2) {
7049 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7050 pipe_config->fdi_lanes);
7051 return -EINVAL;
7052 } else {
7053 return 0;
7054 }
7055 }
7056
7057 if (INTEL_INFO(dev_priv)->num_pipes == 2)
7058 return 0;
7059
7060 /* Ivybridge 3 pipe is really complicated */
7061 switch (pipe) {
7062 case PIPE_A:
7063 return 0;
7064 case PIPE_B:
7065 if (pipe_config->fdi_lanes <= 2)
7066 return 0;
7067
7068 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7069 other_crtc_state =
7070 intel_atomic_get_crtc_state(state, other_crtc);
7071 if (IS_ERR(other_crtc_state))
7072 return PTR_ERR(other_crtc_state);
7073
7074 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7075 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7076 pipe_name(pipe), pipe_config->fdi_lanes);
7077 return -EINVAL;
7078 }
7079 return 0;
7080 case PIPE_C:
7081 if (pipe_config->fdi_lanes > 2) {
7082 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7083 pipe_name(pipe), pipe_config->fdi_lanes);
7084 return -EINVAL;
7085 }
7086
7087 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7088 other_crtc_state =
7089 intel_atomic_get_crtc_state(state, other_crtc);
7090 if (IS_ERR(other_crtc_state))
7091 return PTR_ERR(other_crtc_state);
7092
7093 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7094 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7095 return -EINVAL;
7096 }
7097 return 0;
7098 default:
7099 BUG();
7100 }
7101 }
7102
7103 #define RETRY 1
7104 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7105 struct intel_crtc_state *pipe_config)
7106 {
7107 struct drm_device *dev = intel_crtc->base.dev;
7108 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7109 int lane, link_bw, fdi_dotclock, ret;
7110 bool needs_recompute = false;
7111
7112 retry:
7113 /* FDI is a binary signal running at ~2.7GHz, encoding
7114 * each output octet as 10 bits. The actual frequency
7115 * is stored as a divider into a 100MHz clock, and the
7116 * mode pixel clock is stored in units of 1KHz.
7117 * Hence the bw of each lane in terms of the mode signal
7118 * is:
7119 */
7120 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7121
7122 fdi_dotclock = adjusted_mode->crtc_clock;
7123
7124 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7125 pipe_config->pipe_bpp);
7126
7127 pipe_config->fdi_lanes = lane;
7128
7129 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7130 link_bw, &pipe_config->fdi_m_n, false);
7131
7132 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7133 if (ret == -EDEADLK)
7134 return ret;
7135
7136 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7137 pipe_config->pipe_bpp -= 2*3;
7138 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7139 pipe_config->pipe_bpp);
7140 needs_recompute = true;
7141 pipe_config->bw_constrained = true;
7142
7143 goto retry;
7144 }
7145
7146 if (needs_recompute)
7147 return RETRY;
7148
7149 return ret;
7150 }
7151
7152 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7153 {
7154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7156
7157 /* IPS only exists on ULT machines and is tied to pipe A. */
7158 if (!hsw_crtc_supports_ips(crtc))
7159 return false;
7160
7161 if (!i915_modparams.enable_ips)
7162 return false;
7163
7164 if (crtc_state->pipe_bpp > 24)
7165 return false;
7166
7167 /*
7168 * We compare against max which means we must take
7169 * the increased cdclk requirement into account when
7170 * calculating the new cdclk.
7171 *
7172 * Should measure whether using a lower cdclk w/o IPS
7173 */
7174 if (IS_BROADWELL(dev_priv) &&
7175 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7176 return false;
7177
7178 return true;
7179 }
7180
7181 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7182 {
7183 struct drm_i915_private *dev_priv =
7184 to_i915(crtc_state->base.crtc->dev);
7185 struct intel_atomic_state *intel_state =
7186 to_intel_atomic_state(crtc_state->base.state);
7187
7188 if (!hsw_crtc_state_ips_capable(crtc_state))
7189 return false;
7190
7191 /*
7192 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7193 * enabled and disabled dynamically based on package C states,
7194 * user space can't make reliable use of the CRCs, so let's just
7195 * completely disable it.
7196 */
7197 if (crtc_state->crc_enabled)
7198 return false;
7199
7200 /* IPS should be fine as long as at least one plane is enabled. */
7201 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7202 return false;
7203
7204 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7205 if (IS_BROADWELL(dev_priv) &&
7206 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7207 return false;
7208
7209 return true;
7210 }
7211
7212 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7213 {
7214 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7215
7216 /* GDG double wide on either pipe, otherwise pipe A only */
7217 return INTEL_GEN(dev_priv) < 4 &&
7218 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7219 }
7220
7221 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7222 {
7223 u32 pixel_rate;
7224
7225 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7226
7227 /*
7228 * We only use IF-ID interlacing. If we ever use
7229 * PF-ID we'll need to adjust the pixel_rate here.
7230 */
7231
7232 if (pipe_config->pch_pfit.enabled) {
7233 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7234 u32 pfit_size = pipe_config->pch_pfit.size;
7235
7236 pipe_w = pipe_config->pipe_src_w;
7237 pipe_h = pipe_config->pipe_src_h;
7238
7239 pfit_w = (pfit_size >> 16) & 0xFFFF;
7240 pfit_h = pfit_size & 0xFFFF;
7241 if (pipe_w < pfit_w)
7242 pipe_w = pfit_w;
7243 if (pipe_h < pfit_h)
7244 pipe_h = pfit_h;
7245
7246 if (WARN_ON(!pfit_w || !pfit_h))
7247 return pixel_rate;
7248
7249 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7250 pfit_w * pfit_h);
7251 }
7252
7253 return pixel_rate;
7254 }
7255
7256 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7257 {
7258 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7259
7260 if (HAS_GMCH(dev_priv))
7261 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7262 crtc_state->pixel_rate =
7263 crtc_state->base.adjusted_mode.crtc_clock;
7264 else
7265 crtc_state->pixel_rate =
7266 ilk_pipe_pixel_rate(crtc_state);
7267 }
7268
7269 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7270 struct intel_crtc_state *pipe_config)
7271 {
7272 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7273 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7274 int clock_limit = dev_priv->max_dotclk_freq;
7275
7276 if (INTEL_GEN(dev_priv) < 4) {
7277 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7278
7279 /*
7280 * Enable double wide mode when the dot clock
7281 * is > 90% of the (display) core speed.
7282 */
7283 if (intel_crtc_supports_double_wide(crtc) &&
7284 adjusted_mode->crtc_clock > clock_limit) {
7285 clock_limit = dev_priv->max_dotclk_freq;
7286 pipe_config->double_wide = true;
7287 }
7288 }
7289
7290 if (adjusted_mode->crtc_clock > clock_limit) {
7291 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7292 adjusted_mode->crtc_clock, clock_limit,
7293 yesno(pipe_config->double_wide));
7294 return -EINVAL;
7295 }
7296
7297 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7298 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7299 pipe_config->base.ctm) {
7300 /*
7301 * There is only one pipe CSC unit per pipe, and we need that
7302 * for output conversion from RGB->YCBCR. So if CTM is already
7303 * applied we can't support YCBCR420 output.
7304 */
7305 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7306 return -EINVAL;
7307 }
7308
7309 /*
7310 * Pipe horizontal size must be even in:
7311 * - DVO ganged mode
7312 * - LVDS dual channel mode
7313 * - Double wide pipe
7314 */
7315 if (pipe_config->pipe_src_w & 1) {
7316 if (pipe_config->double_wide) {
7317 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7318 return -EINVAL;
7319 }
7320
7321 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7322 intel_is_dual_link_lvds(dev_priv)) {
7323 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7324 return -EINVAL;
7325 }
7326 }
7327
7328 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7329 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7330 */
7331 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7332 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7333 return -EINVAL;
7334
7335 intel_crtc_compute_pixel_rate(pipe_config);
7336
7337 if (pipe_config->has_pch_encoder)
7338 return ironlake_fdi_compute_config(crtc, pipe_config);
7339
7340 return 0;
7341 }
7342
7343 static void
7344 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7345 {
7346 while (*num > DATA_LINK_M_N_MASK ||
7347 *den > DATA_LINK_M_N_MASK) {
7348 *num >>= 1;
7349 *den >>= 1;
7350 }
7351 }
7352
7353 static void compute_m_n(unsigned int m, unsigned int n,
7354 u32 *ret_m, u32 *ret_n,
7355 bool constant_n)
7356 {
7357 /*
7358 * Several DP dongles in particular seem to be fussy about
7359 * too large link M/N values. Give N value as 0x8000 that
7360 * should be acceptable by specific devices. 0x8000 is the
7361 * specified fixed N value for asynchronous clock mode,
7362 * which the devices expect also in synchronous clock mode.
7363 */
7364 if (constant_n)
7365 *ret_n = 0x8000;
7366 else
7367 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7368
7369 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7370 intel_reduce_m_n_ratio(ret_m, ret_n);
7371 }
7372
7373 void
7374 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7375 int pixel_clock, int link_clock,
7376 struct intel_link_m_n *m_n,
7377 bool constant_n)
7378 {
7379 m_n->tu = 64;
7380
7381 compute_m_n(bits_per_pixel * pixel_clock,
7382 link_clock * nlanes * 8,
7383 &m_n->gmch_m, &m_n->gmch_n,
7384 constant_n);
7385
7386 compute_m_n(pixel_clock, link_clock,
7387 &m_n->link_m, &m_n->link_n,
7388 constant_n);
7389 }
7390
7391 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7392 {
7393 if (i915_modparams.panel_use_ssc >= 0)
7394 return i915_modparams.panel_use_ssc != 0;
7395 return dev_priv->vbt.lvds_use_ssc
7396 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7397 }
7398
7399 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7400 {
7401 return (1 << dpll->n) << 16 | dpll->m2;
7402 }
7403
7404 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7405 {
7406 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7407 }
7408
7409 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7410 struct intel_crtc_state *crtc_state,
7411 struct dpll *reduced_clock)
7412 {
7413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7414 u32 fp, fp2 = 0;
7415
7416 if (IS_PINEVIEW(dev_priv)) {
7417 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7418 if (reduced_clock)
7419 fp2 = pnv_dpll_compute_fp(reduced_clock);
7420 } else {
7421 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7422 if (reduced_clock)
7423 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7424 }
7425
7426 crtc_state->dpll_hw_state.fp0 = fp;
7427
7428 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7429 reduced_clock) {
7430 crtc_state->dpll_hw_state.fp1 = fp2;
7431 } else {
7432 crtc_state->dpll_hw_state.fp1 = fp;
7433 }
7434 }
7435
7436 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7437 pipe)
7438 {
7439 u32 reg_val;
7440
7441 /*
7442 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7443 * and set it to a reasonable value instead.
7444 */
7445 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7446 reg_val &= 0xffffff00;
7447 reg_val |= 0x00000030;
7448 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7449
7450 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7451 reg_val &= 0x00ffffff;
7452 reg_val |= 0x8c000000;
7453 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7454
7455 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7456 reg_val &= 0xffffff00;
7457 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7458
7459 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7460 reg_val &= 0x00ffffff;
7461 reg_val |= 0xb0000000;
7462 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7463 }
7464
7465 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7466 const struct intel_link_m_n *m_n)
7467 {
7468 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7469 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7470 enum pipe pipe = crtc->pipe;
7471
7472 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7473 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7474 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7475 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7476 }
7477
7478 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7479 enum transcoder transcoder)
7480 {
7481 if (IS_HASWELL(dev_priv))
7482 return transcoder == TRANSCODER_EDP;
7483
7484 /*
7485 * Strictly speaking some registers are available before
7486 * gen7, but we only support DRRS on gen7+
7487 */
7488 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7489 }
7490
7491 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7492 const struct intel_link_m_n *m_n,
7493 const struct intel_link_m_n *m2_n2)
7494 {
7495 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7496 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7497 enum pipe pipe = crtc->pipe;
7498 enum transcoder transcoder = crtc_state->cpu_transcoder;
7499
7500 if (INTEL_GEN(dev_priv) >= 5) {
7501 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7502 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7503 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7504 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7505 /*
7506 * M2_N2 registers are set only if DRRS is supported
7507 * (to make sure the registers are not unnecessarily accessed).
7508 */
7509 if (m2_n2 && crtc_state->has_drrs &&
7510 transcoder_has_m2_n2(dev_priv, transcoder)) {
7511 I915_WRITE(PIPE_DATA_M2(transcoder),
7512 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7513 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7514 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7515 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7516 }
7517 } else {
7518 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7519 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7520 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7521 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7522 }
7523 }
7524
7525 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7526 {
7527 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7528
7529 if (m_n == M1_N1) {
7530 dp_m_n = &crtc_state->dp_m_n;
7531 dp_m2_n2 = &crtc_state->dp_m2_n2;
7532 } else if (m_n == M2_N2) {
7533
7534 /*
7535 * M2_N2 registers are not supported. Hence m2_n2 divider value
7536 * needs to be programmed into M1_N1.
7537 */
7538 dp_m_n = &crtc_state->dp_m2_n2;
7539 } else {
7540 DRM_ERROR("Unsupported divider value\n");
7541 return;
7542 }
7543
7544 if (crtc_state->has_pch_encoder)
7545 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7546 else
7547 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7548 }
7549
7550 static void vlv_compute_dpll(struct intel_crtc *crtc,
7551 struct intel_crtc_state *pipe_config)
7552 {
7553 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7554 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7555 if (crtc->pipe != PIPE_A)
7556 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7557
7558 /* DPLL not used with DSI, but still need the rest set up */
7559 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7560 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7561 DPLL_EXT_BUFFER_ENABLE_VLV;
7562
7563 pipe_config->dpll_hw_state.dpll_md =
7564 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7565 }
7566
7567 static void chv_compute_dpll(struct intel_crtc *crtc,
7568 struct intel_crtc_state *pipe_config)
7569 {
7570 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7571 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7572 if (crtc->pipe != PIPE_A)
7573 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7574
7575 /* DPLL not used with DSI, but still need the rest set up */
7576 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7577 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7578
7579 pipe_config->dpll_hw_state.dpll_md =
7580 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7581 }
7582
7583 static void vlv_prepare_pll(struct intel_crtc *crtc,
7584 const struct intel_crtc_state *pipe_config)
7585 {
7586 struct drm_device *dev = crtc->base.dev;
7587 struct drm_i915_private *dev_priv = to_i915(dev);
7588 enum pipe pipe = crtc->pipe;
7589 u32 mdiv;
7590 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7591 u32 coreclk, reg_val;
7592
7593 /* Enable Refclk */
7594 I915_WRITE(DPLL(pipe),
7595 pipe_config->dpll_hw_state.dpll &
7596 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7597
7598 /* No need to actually set up the DPLL with DSI */
7599 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7600 return;
7601
7602 vlv_dpio_get(dev_priv);
7603
7604 bestn = pipe_config->dpll.n;
7605 bestm1 = pipe_config->dpll.m1;
7606 bestm2 = pipe_config->dpll.m2;
7607 bestp1 = pipe_config->dpll.p1;
7608 bestp2 = pipe_config->dpll.p2;
7609
7610 /* See eDP HDMI DPIO driver vbios notes doc */
7611
7612 /* PLL B needs special handling */
7613 if (pipe == PIPE_B)
7614 vlv_pllb_recal_opamp(dev_priv, pipe);
7615
7616 /* Set up Tx target for periodic Rcomp update */
7617 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7618
7619 /* Disable target IRef on PLL */
7620 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7621 reg_val &= 0x00ffffff;
7622 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7623
7624 /* Disable fast lock */
7625 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7626
7627 /* Set idtafcrecal before PLL is enabled */
7628 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7629 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7630 mdiv |= ((bestn << DPIO_N_SHIFT));
7631 mdiv |= (1 << DPIO_K_SHIFT);
7632
7633 /*
7634 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7635 * but we don't support that).
7636 * Note: don't use the DAC post divider as it seems unstable.
7637 */
7638 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7639 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7640
7641 mdiv |= DPIO_ENABLE_CALIBRATION;
7642 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7643
7644 /* Set HBR and RBR LPF coefficients */
7645 if (pipe_config->port_clock == 162000 ||
7646 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7647 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7648 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7649 0x009f0003);
7650 else
7651 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7652 0x00d0000f);
7653
7654 if (intel_crtc_has_dp_encoder(pipe_config)) {
7655 /* Use SSC source */
7656 if (pipe == PIPE_A)
7657 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7658 0x0df40000);
7659 else
7660 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7661 0x0df70000);
7662 } else { /* HDMI or VGA */
7663 /* Use bend source */
7664 if (pipe == PIPE_A)
7665 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7666 0x0df70000);
7667 else
7668 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7669 0x0df40000);
7670 }
7671
7672 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7673 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7674 if (intel_crtc_has_dp_encoder(pipe_config))
7675 coreclk |= 0x01000000;
7676 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7677
7678 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7679
7680 vlv_dpio_put(dev_priv);
7681 }
7682
7683 static void chv_prepare_pll(struct intel_crtc *crtc,
7684 const struct intel_crtc_state *pipe_config)
7685 {
7686 struct drm_device *dev = crtc->base.dev;
7687 struct drm_i915_private *dev_priv = to_i915(dev);
7688 enum pipe pipe = crtc->pipe;
7689 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7690 u32 loopfilter, tribuf_calcntr;
7691 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7692 u32 dpio_val;
7693 int vco;
7694
7695 /* Enable Refclk and SSC */
7696 I915_WRITE(DPLL(pipe),
7697 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7698
7699 /* No need to actually set up the DPLL with DSI */
7700 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7701 return;
7702
7703 bestn = pipe_config->dpll.n;
7704 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7705 bestm1 = pipe_config->dpll.m1;
7706 bestm2 = pipe_config->dpll.m2 >> 22;
7707 bestp1 = pipe_config->dpll.p1;
7708 bestp2 = pipe_config->dpll.p2;
7709 vco = pipe_config->dpll.vco;
7710 dpio_val = 0;
7711 loopfilter = 0;
7712
7713 vlv_dpio_get(dev_priv);
7714
7715 /* p1 and p2 divider */
7716 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7717 5 << DPIO_CHV_S1_DIV_SHIFT |
7718 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7719 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7720 1 << DPIO_CHV_K_DIV_SHIFT);
7721
7722 /* Feedback post-divider - m2 */
7723 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7724
7725 /* Feedback refclk divider - n and m1 */
7726 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7727 DPIO_CHV_M1_DIV_BY_2 |
7728 1 << DPIO_CHV_N_DIV_SHIFT);
7729
7730 /* M2 fraction division */
7731 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7732
7733 /* M2 fraction division enable */
7734 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7735 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7736 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7737 if (bestm2_frac)
7738 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7739 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7740
7741 /* Program digital lock detect threshold */
7742 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7743 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7744 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7745 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7746 if (!bestm2_frac)
7747 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7748 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7749
7750 /* Loop filter */
7751 if (vco == 5400000) {
7752 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7753 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7754 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7755 tribuf_calcntr = 0x9;
7756 } else if (vco <= 6200000) {
7757 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7758 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7759 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7760 tribuf_calcntr = 0x9;
7761 } else if (vco <= 6480000) {
7762 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7763 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7764 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7765 tribuf_calcntr = 0x8;
7766 } else {
7767 /* Not supported. Apply the same limits as in the max case */
7768 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7769 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7770 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7771 tribuf_calcntr = 0;
7772 }
7773 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7774
7775 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7776 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7777 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7778 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7779
7780 /* AFC Recal */
7781 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7782 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7783 DPIO_AFC_RECAL);
7784
7785 vlv_dpio_put(dev_priv);
7786 }
7787
7788 /**
7789 * vlv_force_pll_on - forcibly enable just the PLL
7790 * @dev_priv: i915 private structure
7791 * @pipe: pipe PLL to enable
7792 * @dpll: PLL configuration
7793 *
7794 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7795 * in cases where we need the PLL enabled even when @pipe is not going to
7796 * be enabled.
7797 */
7798 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7799 const struct dpll *dpll)
7800 {
7801 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7802 struct intel_crtc_state *pipe_config;
7803
7804 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7805 if (!pipe_config)
7806 return -ENOMEM;
7807
7808 pipe_config->base.crtc = &crtc->base;
7809 pipe_config->pixel_multiplier = 1;
7810 pipe_config->dpll = *dpll;
7811
7812 if (IS_CHERRYVIEW(dev_priv)) {
7813 chv_compute_dpll(crtc, pipe_config);
7814 chv_prepare_pll(crtc, pipe_config);
7815 chv_enable_pll(crtc, pipe_config);
7816 } else {
7817 vlv_compute_dpll(crtc, pipe_config);
7818 vlv_prepare_pll(crtc, pipe_config);
7819 vlv_enable_pll(crtc, pipe_config);
7820 }
7821
7822 kfree(pipe_config);
7823
7824 return 0;
7825 }
7826
7827 /**
7828 * vlv_force_pll_off - forcibly disable just the PLL
7829 * @dev_priv: i915 private structure
7830 * @pipe: pipe PLL to disable
7831 *
7832 * Disable the PLL for @pipe. To be used in cases where we need
7833 * the PLL enabled even when @pipe is not going to be enabled.
7834 */
7835 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7836 {
7837 if (IS_CHERRYVIEW(dev_priv))
7838 chv_disable_pll(dev_priv, pipe);
7839 else
7840 vlv_disable_pll(dev_priv, pipe);
7841 }
7842
7843 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7844 struct intel_crtc_state *crtc_state,
7845 struct dpll *reduced_clock)
7846 {
7847 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7848 u32 dpll;
7849 struct dpll *clock = &crtc_state->dpll;
7850
7851 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7852
7853 dpll = DPLL_VGA_MODE_DIS;
7854
7855 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7856 dpll |= DPLLB_MODE_LVDS;
7857 else
7858 dpll |= DPLLB_MODE_DAC_SERIAL;
7859
7860 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7861 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7862 dpll |= (crtc_state->pixel_multiplier - 1)
7863 << SDVO_MULTIPLIER_SHIFT_HIRES;
7864 }
7865
7866 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7867 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7868 dpll |= DPLL_SDVO_HIGH_SPEED;
7869
7870 if (intel_crtc_has_dp_encoder(crtc_state))
7871 dpll |= DPLL_SDVO_HIGH_SPEED;
7872
7873 /* compute bitmask from p1 value */
7874 if (IS_PINEVIEW(dev_priv))
7875 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7876 else {
7877 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7878 if (IS_G4X(dev_priv) && reduced_clock)
7879 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7880 }
7881 switch (clock->p2) {
7882 case 5:
7883 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7884 break;
7885 case 7:
7886 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7887 break;
7888 case 10:
7889 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7890 break;
7891 case 14:
7892 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7893 break;
7894 }
7895 if (INTEL_GEN(dev_priv) >= 4)
7896 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7897
7898 if (crtc_state->sdvo_tv_clock)
7899 dpll |= PLL_REF_INPUT_TVCLKINBC;
7900 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7901 intel_panel_use_ssc(dev_priv))
7902 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7903 else
7904 dpll |= PLL_REF_INPUT_DREFCLK;
7905
7906 dpll |= DPLL_VCO_ENABLE;
7907 crtc_state->dpll_hw_state.dpll = dpll;
7908
7909 if (INTEL_GEN(dev_priv) >= 4) {
7910 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7911 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7912 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7913 }
7914 }
7915
7916 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7917 struct intel_crtc_state *crtc_state,
7918 struct dpll *reduced_clock)
7919 {
7920 struct drm_device *dev = crtc->base.dev;
7921 struct drm_i915_private *dev_priv = to_i915(dev);
7922 u32 dpll;
7923 struct dpll *clock = &crtc_state->dpll;
7924
7925 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7926
7927 dpll = DPLL_VGA_MODE_DIS;
7928
7929 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7930 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7931 } else {
7932 if (clock->p1 == 2)
7933 dpll |= PLL_P1_DIVIDE_BY_TWO;
7934 else
7935 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7936 if (clock->p2 == 4)
7937 dpll |= PLL_P2_DIVIDE_BY_4;
7938 }
7939
7940 /*
7941 * Bspec:
7942 * "[Almador Errata}: For the correct operation of the muxed DVO pins
7943 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7944 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7945 * Enable) must be set to “1” in both the DPLL A Control Register
7946 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7947 *
7948 * For simplicity We simply keep both bits always enabled in
7949 * both DPLLS. The spec says we should disable the DVO 2X clock
7950 * when not needed, but this seems to work fine in practice.
7951 */
7952 if (IS_I830(dev_priv) ||
7953 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7954 dpll |= DPLL_DVO_2X_MODE;
7955
7956 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7957 intel_panel_use_ssc(dev_priv))
7958 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7959 else
7960 dpll |= PLL_REF_INPUT_DREFCLK;
7961
7962 dpll |= DPLL_VCO_ENABLE;
7963 crtc_state->dpll_hw_state.dpll = dpll;
7964 }
7965
7966 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7967 {
7968 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7970 enum pipe pipe = crtc->pipe;
7971 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7972 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7973 u32 crtc_vtotal, crtc_vblank_end;
7974 int vsyncshift = 0;
7975
7976 /* We need to be careful not to changed the adjusted mode, for otherwise
7977 * the hw state checker will get angry at the mismatch. */
7978 crtc_vtotal = adjusted_mode->crtc_vtotal;
7979 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7980
7981 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7982 /* the chip adds 2 halflines automatically */
7983 crtc_vtotal -= 1;
7984 crtc_vblank_end -= 1;
7985
7986 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7987 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7988 else
7989 vsyncshift = adjusted_mode->crtc_hsync_start -
7990 adjusted_mode->crtc_htotal / 2;
7991 if (vsyncshift < 0)
7992 vsyncshift += adjusted_mode->crtc_htotal;
7993 }
7994
7995 if (INTEL_GEN(dev_priv) > 3)
7996 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7997
7998 I915_WRITE(HTOTAL(cpu_transcoder),
7999 (adjusted_mode->crtc_hdisplay - 1) |
8000 ((adjusted_mode->crtc_htotal - 1) << 16));
8001 I915_WRITE(HBLANK(cpu_transcoder),
8002 (adjusted_mode->crtc_hblank_start - 1) |
8003 ((adjusted_mode->crtc_hblank_end - 1) << 16));
8004 I915_WRITE(HSYNC(cpu_transcoder),
8005 (adjusted_mode->crtc_hsync_start - 1) |
8006 ((adjusted_mode->crtc_hsync_end - 1) << 16));
8007
8008 I915_WRITE(VTOTAL(cpu_transcoder),
8009 (adjusted_mode->crtc_vdisplay - 1) |
8010 ((crtc_vtotal - 1) << 16));
8011 I915_WRITE(VBLANK(cpu_transcoder),
8012 (adjusted_mode->crtc_vblank_start - 1) |
8013 ((crtc_vblank_end - 1) << 16));
8014 I915_WRITE(VSYNC(cpu_transcoder),
8015 (adjusted_mode->crtc_vsync_start - 1) |
8016 ((adjusted_mode->crtc_vsync_end - 1) << 16));
8017
8018 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8019 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8020 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8021 * bits. */
8022 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8023 (pipe == PIPE_B || pipe == PIPE_C))
8024 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8025
8026 }
8027
8028 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8029 {
8030 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8031 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8032 enum pipe pipe = crtc->pipe;
8033
8034 /* pipesrc controls the size that is scaled from, which should
8035 * always be the user's requested size.
8036 */
8037 I915_WRITE(PIPESRC(pipe),
8038 ((crtc_state->pipe_src_w - 1) << 16) |
8039 (crtc_state->pipe_src_h - 1));
8040 }
8041
8042 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8043 struct intel_crtc_state *pipe_config)
8044 {
8045 struct drm_device *dev = crtc->base.dev;
8046 struct drm_i915_private *dev_priv = to_i915(dev);
8047 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8048 u32 tmp;
8049
8050 tmp = I915_READ(HTOTAL(cpu_transcoder));
8051 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8052 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8053
8054 if (!transcoder_is_dsi(cpu_transcoder)) {
8055 tmp = I915_READ(HBLANK(cpu_transcoder));
8056 pipe_config->base.adjusted_mode.crtc_hblank_start =
8057 (tmp & 0xffff) + 1;
8058 pipe_config->base.adjusted_mode.crtc_hblank_end =
8059 ((tmp >> 16) & 0xffff) + 1;
8060 }
8061 tmp = I915_READ(HSYNC(cpu_transcoder));
8062 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8063 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8064
8065 tmp = I915_READ(VTOTAL(cpu_transcoder));
8066 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8067 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8068
8069 if (!transcoder_is_dsi(cpu_transcoder)) {
8070 tmp = I915_READ(VBLANK(cpu_transcoder));
8071 pipe_config->base.adjusted_mode.crtc_vblank_start =
8072 (tmp & 0xffff) + 1;
8073 pipe_config->base.adjusted_mode.crtc_vblank_end =
8074 ((tmp >> 16) & 0xffff) + 1;
8075 }
8076 tmp = I915_READ(VSYNC(cpu_transcoder));
8077 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8078 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8079
8080 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
8081 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8082 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8083 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8084 }
8085 }
8086
8087 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8088 struct intel_crtc_state *pipe_config)
8089 {
8090 struct drm_device *dev = crtc->base.dev;
8091 struct drm_i915_private *dev_priv = to_i915(dev);
8092 u32 tmp;
8093
8094 tmp = I915_READ(PIPESRC(crtc->pipe));
8095 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8096 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8097
8098 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8099 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8100 }
8101
8102 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8103 struct intel_crtc_state *pipe_config)
8104 {
8105 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8106 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8107 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8108 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8109
8110 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8111 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8112 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8113 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8114
8115 mode->flags = pipe_config->base.adjusted_mode.flags;
8116 mode->type = DRM_MODE_TYPE_DRIVER;
8117
8118 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8119
8120 mode->hsync = drm_mode_hsync(mode);
8121 mode->vrefresh = drm_mode_vrefresh(mode);
8122 drm_mode_set_name(mode);
8123 }
8124
8125 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8126 {
8127 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8128 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8129 u32 pipeconf;
8130
8131 pipeconf = 0;
8132
8133 /* we keep both pipes enabled on 830 */
8134 if (IS_I830(dev_priv))
8135 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8136
8137 if (crtc_state->double_wide)
8138 pipeconf |= PIPECONF_DOUBLE_WIDE;
8139
8140 /* only g4x and later have fancy bpc/dither controls */
8141 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8142 IS_CHERRYVIEW(dev_priv)) {
8143 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8144 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8145 pipeconf |= PIPECONF_DITHER_EN |
8146 PIPECONF_DITHER_TYPE_SP;
8147
8148 switch (crtc_state->pipe_bpp) {
8149 case 18:
8150 pipeconf |= PIPECONF_6BPC;
8151 break;
8152 case 24:
8153 pipeconf |= PIPECONF_8BPC;
8154 break;
8155 case 30:
8156 pipeconf |= PIPECONF_10BPC;
8157 break;
8158 default:
8159 /* Case prevented by intel_choose_pipe_bpp_dither. */
8160 BUG();
8161 }
8162 }
8163
8164 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8165 if (INTEL_GEN(dev_priv) < 4 ||
8166 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8167 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8168 else
8169 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8170 } else {
8171 pipeconf |= PIPECONF_PROGRESSIVE;
8172 }
8173
8174 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8175 crtc_state->limited_color_range)
8176 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8177
8178 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8179
8180 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8181 POSTING_READ(PIPECONF(crtc->pipe));
8182 }
8183
8184 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8185 struct intel_crtc_state *crtc_state)
8186 {
8187 struct drm_device *dev = crtc->base.dev;
8188 struct drm_i915_private *dev_priv = to_i915(dev);
8189 const struct intel_limit *limit;
8190 int refclk = 48000;
8191
8192 memset(&crtc_state->dpll_hw_state, 0,
8193 sizeof(crtc_state->dpll_hw_state));
8194
8195 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8196 if (intel_panel_use_ssc(dev_priv)) {
8197 refclk = dev_priv->vbt.lvds_ssc_freq;
8198 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8199 }
8200
8201 limit = &intel_limits_i8xx_lvds;
8202 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8203 limit = &intel_limits_i8xx_dvo;
8204 } else {
8205 limit = &intel_limits_i8xx_dac;
8206 }
8207
8208 if (!crtc_state->clock_set &&
8209 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8210 refclk, NULL, &crtc_state->dpll)) {
8211 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8212 return -EINVAL;
8213 }
8214
8215 i8xx_compute_dpll(crtc, crtc_state, NULL);
8216
8217 return 0;
8218 }
8219
8220 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8221 struct intel_crtc_state *crtc_state)
8222 {
8223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8224 const struct intel_limit *limit;
8225 int refclk = 96000;
8226
8227 memset(&crtc_state->dpll_hw_state, 0,
8228 sizeof(crtc_state->dpll_hw_state));
8229
8230 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8231 if (intel_panel_use_ssc(dev_priv)) {
8232 refclk = dev_priv->vbt.lvds_ssc_freq;
8233 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8234 }
8235
8236 if (intel_is_dual_link_lvds(dev_priv))
8237 limit = &intel_limits_g4x_dual_channel_lvds;
8238 else
8239 limit = &intel_limits_g4x_single_channel_lvds;
8240 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8241 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8242 limit = &intel_limits_g4x_hdmi;
8243 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8244 limit = &intel_limits_g4x_sdvo;
8245 } else {
8246 /* The option is for other outputs */
8247 limit = &intel_limits_i9xx_sdvo;
8248 }
8249
8250 if (!crtc_state->clock_set &&
8251 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8252 refclk, NULL, &crtc_state->dpll)) {
8253 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8254 return -EINVAL;
8255 }
8256
8257 i9xx_compute_dpll(crtc, crtc_state, NULL);
8258
8259 return 0;
8260 }
8261
8262 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8263 struct intel_crtc_state *crtc_state)
8264 {
8265 struct drm_device *dev = crtc->base.dev;
8266 struct drm_i915_private *dev_priv = to_i915(dev);
8267 const struct intel_limit *limit;
8268 int refclk = 96000;
8269
8270 memset(&crtc_state->dpll_hw_state, 0,
8271 sizeof(crtc_state->dpll_hw_state));
8272
8273 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8274 if (intel_panel_use_ssc(dev_priv)) {
8275 refclk = dev_priv->vbt.lvds_ssc_freq;
8276 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8277 }
8278
8279 limit = &intel_limits_pineview_lvds;
8280 } else {
8281 limit = &intel_limits_pineview_sdvo;
8282 }
8283
8284 if (!crtc_state->clock_set &&
8285 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8286 refclk, NULL, &crtc_state->dpll)) {
8287 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8288 return -EINVAL;
8289 }
8290
8291 i9xx_compute_dpll(crtc, crtc_state, NULL);
8292
8293 return 0;
8294 }
8295
8296 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8297 struct intel_crtc_state *crtc_state)
8298 {
8299 struct drm_device *dev = crtc->base.dev;
8300 struct drm_i915_private *dev_priv = to_i915(dev);
8301 const struct intel_limit *limit;
8302 int refclk = 96000;
8303
8304 memset(&crtc_state->dpll_hw_state, 0,
8305 sizeof(crtc_state->dpll_hw_state));
8306
8307 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8308 if (intel_panel_use_ssc(dev_priv)) {
8309 refclk = dev_priv->vbt.lvds_ssc_freq;
8310 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8311 }
8312
8313 limit = &intel_limits_i9xx_lvds;
8314 } else {
8315 limit = &intel_limits_i9xx_sdvo;
8316 }
8317
8318 if (!crtc_state->clock_set &&
8319 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8320 refclk, NULL, &crtc_state->dpll)) {
8321 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8322 return -EINVAL;
8323 }
8324
8325 i9xx_compute_dpll(crtc, crtc_state, NULL);
8326
8327 return 0;
8328 }
8329
8330 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8331 struct intel_crtc_state *crtc_state)
8332 {
8333 int refclk = 100000;
8334 const struct intel_limit *limit = &intel_limits_chv;
8335
8336 memset(&crtc_state->dpll_hw_state, 0,
8337 sizeof(crtc_state->dpll_hw_state));
8338
8339 if (!crtc_state->clock_set &&
8340 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8341 refclk, NULL, &crtc_state->dpll)) {
8342 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8343 return -EINVAL;
8344 }
8345
8346 chv_compute_dpll(crtc, crtc_state);
8347
8348 return 0;
8349 }
8350
8351 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8352 struct intel_crtc_state *crtc_state)
8353 {
8354 int refclk = 100000;
8355 const struct intel_limit *limit = &intel_limits_vlv;
8356
8357 memset(&crtc_state->dpll_hw_state, 0,
8358 sizeof(crtc_state->dpll_hw_state));
8359
8360 if (!crtc_state->clock_set &&
8361 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8362 refclk, NULL, &crtc_state->dpll)) {
8363 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8364 return -EINVAL;
8365 }
8366
8367 vlv_compute_dpll(crtc, crtc_state);
8368
8369 return 0;
8370 }
8371
8372 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8373 {
8374 if (IS_I830(dev_priv))
8375 return false;
8376
8377 return INTEL_GEN(dev_priv) >= 4 ||
8378 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8379 }
8380
8381 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8382 struct intel_crtc_state *pipe_config)
8383 {
8384 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8385 u32 tmp;
8386
8387 if (!i9xx_has_pfit(dev_priv))
8388 return;
8389
8390 tmp = I915_READ(PFIT_CONTROL);
8391 if (!(tmp & PFIT_ENABLE))
8392 return;
8393
8394 /* Check whether the pfit is attached to our pipe. */
8395 if (INTEL_GEN(dev_priv) < 4) {
8396 if (crtc->pipe != PIPE_B)
8397 return;
8398 } else {
8399 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8400 return;
8401 }
8402
8403 pipe_config->gmch_pfit.control = tmp;
8404 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8405 }
8406
8407 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8408 struct intel_crtc_state *pipe_config)
8409 {
8410 struct drm_device *dev = crtc->base.dev;
8411 struct drm_i915_private *dev_priv = to_i915(dev);
8412 int pipe = pipe_config->cpu_transcoder;
8413 struct dpll clock;
8414 u32 mdiv;
8415 int refclk = 100000;
8416
8417 /* In case of DSI, DPLL will not be used */
8418 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8419 return;
8420
8421 vlv_dpio_get(dev_priv);
8422 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8423 vlv_dpio_put(dev_priv);
8424
8425 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8426 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8427 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8428 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8429 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8430
8431 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8432 }
8433
8434 static void
8435 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8436 struct intel_initial_plane_config *plane_config)
8437 {
8438 struct drm_device *dev = crtc->base.dev;
8439 struct drm_i915_private *dev_priv = to_i915(dev);
8440 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8441 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8442 enum pipe pipe;
8443 u32 val, base, offset;
8444 int fourcc, pixel_format;
8445 unsigned int aligned_height;
8446 struct drm_framebuffer *fb;
8447 struct intel_framebuffer *intel_fb;
8448
8449 if (!plane->get_hw_state(plane, &pipe))
8450 return;
8451
8452 WARN_ON(pipe != crtc->pipe);
8453
8454 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8455 if (!intel_fb) {
8456 DRM_DEBUG_KMS("failed to alloc fb\n");
8457 return;
8458 }
8459
8460 fb = &intel_fb->base;
8461
8462 fb->dev = dev;
8463
8464 val = I915_READ(DSPCNTR(i9xx_plane));
8465
8466 if (INTEL_GEN(dev_priv) >= 4) {
8467 if (val & DISPPLANE_TILED) {
8468 plane_config->tiling = I915_TILING_X;
8469 fb->modifier = I915_FORMAT_MOD_X_TILED;
8470 }
8471
8472 if (val & DISPPLANE_ROTATE_180)
8473 plane_config->rotation = DRM_MODE_ROTATE_180;
8474 }
8475
8476 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8477 val & DISPPLANE_MIRROR)
8478 plane_config->rotation |= DRM_MODE_REFLECT_X;
8479
8480 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8481 fourcc = i9xx_format_to_fourcc(pixel_format);
8482 fb->format = drm_format_info(fourcc);
8483
8484 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8485 offset = I915_READ(DSPOFFSET(i9xx_plane));
8486 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8487 } else if (INTEL_GEN(dev_priv) >= 4) {
8488 if (plane_config->tiling)
8489 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8490 else
8491 offset = I915_READ(DSPLINOFF(i9xx_plane));
8492 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8493 } else {
8494 base = I915_READ(DSPADDR(i9xx_plane));
8495 }
8496 plane_config->base = base;
8497
8498 val = I915_READ(PIPESRC(pipe));
8499 fb->width = ((val >> 16) & 0xfff) + 1;
8500 fb->height = ((val >> 0) & 0xfff) + 1;
8501
8502 val = I915_READ(DSPSTRIDE(i9xx_plane));
8503 fb->pitches[0] = val & 0xffffffc0;
8504
8505 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8506
8507 plane_config->size = fb->pitches[0] * aligned_height;
8508
8509 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8510 crtc->base.name, plane->base.name, fb->width, fb->height,
8511 fb->format->cpp[0] * 8, base, fb->pitches[0],
8512 plane_config->size);
8513
8514 plane_config->fb = intel_fb;
8515 }
8516
8517 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8518 struct intel_crtc_state *pipe_config)
8519 {
8520 struct drm_device *dev = crtc->base.dev;
8521 struct drm_i915_private *dev_priv = to_i915(dev);
8522 int pipe = pipe_config->cpu_transcoder;
8523 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8524 struct dpll clock;
8525 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8526 int refclk = 100000;
8527
8528 /* In case of DSI, DPLL will not be used */
8529 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8530 return;
8531
8532 vlv_dpio_get(dev_priv);
8533 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8534 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8535 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8536 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8537 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8538 vlv_dpio_put(dev_priv);
8539
8540 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8541 clock.m2 = (pll_dw0 & 0xff) << 22;
8542 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8543 clock.m2 |= pll_dw2 & 0x3fffff;
8544 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8545 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8546 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8547
8548 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8549 }
8550
8551 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8552 struct intel_crtc_state *pipe_config)
8553 {
8554 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8555 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8556
8557 pipe_config->lspcon_downsampling = false;
8558
8559 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8560 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8561
8562 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8563 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8564 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8565
8566 if (ycbcr420_enabled) {
8567 /* We support 4:2:0 in full blend mode only */
8568 if (!blend)
8569 output = INTEL_OUTPUT_FORMAT_INVALID;
8570 else if (!(IS_GEMINILAKE(dev_priv) ||
8571 INTEL_GEN(dev_priv) >= 10))
8572 output = INTEL_OUTPUT_FORMAT_INVALID;
8573 else
8574 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8575 } else {
8576 /*
8577 * Currently there is no interface defined to
8578 * check user preference between RGB/YCBCR444
8579 * or YCBCR420. So the only possible case for
8580 * YCBCR444 usage is driving YCBCR420 output
8581 * with LSPCON, when pipe is configured for
8582 * YCBCR444 output and LSPCON takes care of
8583 * downsampling it.
8584 */
8585 pipe_config->lspcon_downsampling = true;
8586 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8587 }
8588 }
8589 }
8590
8591 pipe_config->output_format = output;
8592 }
8593
8594 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8595 {
8596 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8597 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8598 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8599 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8600 u32 tmp;
8601
8602 tmp = I915_READ(DSPCNTR(i9xx_plane));
8603
8604 if (tmp & DISPPLANE_GAMMA_ENABLE)
8605 crtc_state->gamma_enable = true;
8606
8607 if (!HAS_GMCH(dev_priv) &&
8608 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8609 crtc_state->csc_enable = true;
8610 }
8611
8612 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8613 struct intel_crtc_state *pipe_config)
8614 {
8615 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8616 enum intel_display_power_domain power_domain;
8617 intel_wakeref_t wakeref;
8618 u32 tmp;
8619 bool ret;
8620
8621 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8622 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8623 if (!wakeref)
8624 return false;
8625
8626 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8627 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8628 pipe_config->shared_dpll = NULL;
8629
8630 ret = false;
8631
8632 tmp = I915_READ(PIPECONF(crtc->pipe));
8633 if (!(tmp & PIPECONF_ENABLE))
8634 goto out;
8635
8636 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8637 IS_CHERRYVIEW(dev_priv)) {
8638 switch (tmp & PIPECONF_BPC_MASK) {
8639 case PIPECONF_6BPC:
8640 pipe_config->pipe_bpp = 18;
8641 break;
8642 case PIPECONF_8BPC:
8643 pipe_config->pipe_bpp = 24;
8644 break;
8645 case PIPECONF_10BPC:
8646 pipe_config->pipe_bpp = 30;
8647 break;
8648 default:
8649 break;
8650 }
8651 }
8652
8653 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8654 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8655 pipe_config->limited_color_range = true;
8656
8657 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8658 PIPECONF_GAMMA_MODE_SHIFT;
8659
8660 if (IS_CHERRYVIEW(dev_priv))
8661 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8662
8663 i9xx_get_pipe_color_config(pipe_config);
8664 intel_color_get_config(pipe_config);
8665
8666 if (INTEL_GEN(dev_priv) < 4)
8667 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8668
8669 intel_get_pipe_timings(crtc, pipe_config);
8670 intel_get_pipe_src_size(crtc, pipe_config);
8671
8672 i9xx_get_pfit_config(crtc, pipe_config);
8673
8674 if (INTEL_GEN(dev_priv) >= 4) {
8675 /* No way to read it out on pipes B and C */
8676 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8677 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8678 else
8679 tmp = I915_READ(DPLL_MD(crtc->pipe));
8680 pipe_config->pixel_multiplier =
8681 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8682 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8683 pipe_config->dpll_hw_state.dpll_md = tmp;
8684 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8685 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8686 tmp = I915_READ(DPLL(crtc->pipe));
8687 pipe_config->pixel_multiplier =
8688 ((tmp & SDVO_MULTIPLIER_MASK)
8689 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8690 } else {
8691 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8692 * port and will be fixed up in the encoder->get_config
8693 * function. */
8694 pipe_config->pixel_multiplier = 1;
8695 }
8696 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8697 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8698 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8699 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8700 } else {
8701 /* Mask out read-only status bits. */
8702 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8703 DPLL_PORTC_READY_MASK |
8704 DPLL_PORTB_READY_MASK);
8705 }
8706
8707 if (IS_CHERRYVIEW(dev_priv))
8708 chv_crtc_clock_get(crtc, pipe_config);
8709 else if (IS_VALLEYVIEW(dev_priv))
8710 vlv_crtc_clock_get(crtc, pipe_config);
8711 else
8712 i9xx_crtc_clock_get(crtc, pipe_config);
8713
8714 /*
8715 * Normally the dotclock is filled in by the encoder .get_config()
8716 * but in case the pipe is enabled w/o any ports we need a sane
8717 * default.
8718 */
8719 pipe_config->base.adjusted_mode.crtc_clock =
8720 pipe_config->port_clock / pipe_config->pixel_multiplier;
8721
8722 ret = true;
8723
8724 out:
8725 intel_display_power_put(dev_priv, power_domain, wakeref);
8726
8727 return ret;
8728 }
8729
8730 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8731 {
8732 struct intel_encoder *encoder;
8733 int i;
8734 u32 val, final;
8735 bool has_lvds = false;
8736 bool has_cpu_edp = false;
8737 bool has_panel = false;
8738 bool has_ck505 = false;
8739 bool can_ssc = false;
8740 bool using_ssc_source = false;
8741
8742 /* We need to take the global config into account */
8743 for_each_intel_encoder(&dev_priv->drm, encoder) {
8744 switch (encoder->type) {
8745 case INTEL_OUTPUT_LVDS:
8746 has_panel = true;
8747 has_lvds = true;
8748 break;
8749 case INTEL_OUTPUT_EDP:
8750 has_panel = true;
8751 if (encoder->port == PORT_A)
8752 has_cpu_edp = true;
8753 break;
8754 default:
8755 break;
8756 }
8757 }
8758
8759 if (HAS_PCH_IBX(dev_priv)) {
8760 has_ck505 = dev_priv->vbt.display_clock_mode;
8761 can_ssc = has_ck505;
8762 } else {
8763 has_ck505 = false;
8764 can_ssc = true;
8765 }
8766
8767 /* Check if any DPLLs are using the SSC source */
8768 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8769 u32 temp = I915_READ(PCH_DPLL(i));
8770
8771 if (!(temp & DPLL_VCO_ENABLE))
8772 continue;
8773
8774 if ((temp & PLL_REF_INPUT_MASK) ==
8775 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8776 using_ssc_source = true;
8777 break;
8778 }
8779 }
8780
8781 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8782 has_panel, has_lvds, has_ck505, using_ssc_source);
8783
8784 /* Ironlake: try to setup display ref clock before DPLL
8785 * enabling. This is only under driver's control after
8786 * PCH B stepping, previous chipset stepping should be
8787 * ignoring this setting.
8788 */
8789 val = I915_READ(PCH_DREF_CONTROL);
8790
8791 /* As we must carefully and slowly disable/enable each source in turn,
8792 * compute the final state we want first and check if we need to
8793 * make any changes at all.
8794 */
8795 final = val;
8796 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8797 if (has_ck505)
8798 final |= DREF_NONSPREAD_CK505_ENABLE;
8799 else
8800 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8801
8802 final &= ~DREF_SSC_SOURCE_MASK;
8803 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8804 final &= ~DREF_SSC1_ENABLE;
8805
8806 if (has_panel) {
8807 final |= DREF_SSC_SOURCE_ENABLE;
8808
8809 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8810 final |= DREF_SSC1_ENABLE;
8811
8812 if (has_cpu_edp) {
8813 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8814 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8815 else
8816 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8817 } else
8818 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8819 } else if (using_ssc_source) {
8820 final |= DREF_SSC_SOURCE_ENABLE;
8821 final |= DREF_SSC1_ENABLE;
8822 }
8823
8824 if (final == val)
8825 return;
8826
8827 /* Always enable nonspread source */
8828 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8829
8830 if (has_ck505)
8831 val |= DREF_NONSPREAD_CK505_ENABLE;
8832 else
8833 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8834
8835 if (has_panel) {
8836 val &= ~DREF_SSC_SOURCE_MASK;
8837 val |= DREF_SSC_SOURCE_ENABLE;
8838
8839 /* SSC must be turned on before enabling the CPU output */
8840 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8841 DRM_DEBUG_KMS("Using SSC on panel\n");
8842 val |= DREF_SSC1_ENABLE;
8843 } else
8844 val &= ~DREF_SSC1_ENABLE;
8845
8846 /* Get SSC going before enabling the outputs */
8847 I915_WRITE(PCH_DREF_CONTROL, val);
8848 POSTING_READ(PCH_DREF_CONTROL);
8849 udelay(200);
8850
8851 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8852
8853 /* Enable CPU source on CPU attached eDP */
8854 if (has_cpu_edp) {
8855 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8856 DRM_DEBUG_KMS("Using SSC on eDP\n");
8857 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8858 } else
8859 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8860 } else
8861 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8862
8863 I915_WRITE(PCH_DREF_CONTROL, val);
8864 POSTING_READ(PCH_DREF_CONTROL);
8865 udelay(200);
8866 } else {
8867 DRM_DEBUG_KMS("Disabling CPU source output\n");
8868
8869 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8870
8871 /* Turn off CPU output */
8872 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8873
8874 I915_WRITE(PCH_DREF_CONTROL, val);
8875 POSTING_READ(PCH_DREF_CONTROL);
8876 udelay(200);
8877
8878 if (!using_ssc_source) {
8879 DRM_DEBUG_KMS("Disabling SSC source\n");
8880
8881 /* Turn off the SSC source */
8882 val &= ~DREF_SSC_SOURCE_MASK;
8883 val |= DREF_SSC_SOURCE_DISABLE;
8884
8885 /* Turn off SSC1 */
8886 val &= ~DREF_SSC1_ENABLE;
8887
8888 I915_WRITE(PCH_DREF_CONTROL, val);
8889 POSTING_READ(PCH_DREF_CONTROL);
8890 udelay(200);
8891 }
8892 }
8893
8894 BUG_ON(val != final);
8895 }
8896
8897 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8898 {
8899 u32 tmp;
8900
8901 tmp = I915_READ(SOUTH_CHICKEN2);
8902 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8903 I915_WRITE(SOUTH_CHICKEN2, tmp);
8904
8905 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8906 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8907 DRM_ERROR("FDI mPHY reset assert timeout\n");
8908
8909 tmp = I915_READ(SOUTH_CHICKEN2);
8910 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8911 I915_WRITE(SOUTH_CHICKEN2, tmp);
8912
8913 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8914 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8915 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8916 }
8917
8918 /* WaMPhyProgramming:hsw */
8919 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8920 {
8921 u32 tmp;
8922
8923 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8924 tmp &= ~(0xFF << 24);
8925 tmp |= (0x12 << 24);
8926 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8927
8928 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8929 tmp |= (1 << 11);
8930 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8931
8932 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8933 tmp |= (1 << 11);
8934 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8935
8936 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8937 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8938 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8939
8940 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8941 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8942 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8943
8944 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8945 tmp &= ~(7 << 13);
8946 tmp |= (5 << 13);
8947 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8948
8949 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8950 tmp &= ~(7 << 13);
8951 tmp |= (5 << 13);
8952 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8953
8954 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8955 tmp &= ~0xFF;
8956 tmp |= 0x1C;
8957 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8958
8959 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8960 tmp &= ~0xFF;
8961 tmp |= 0x1C;
8962 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8963
8964 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8965 tmp &= ~(0xFF << 16);
8966 tmp |= (0x1C << 16);
8967 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8968
8969 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8970 tmp &= ~(0xFF << 16);
8971 tmp |= (0x1C << 16);
8972 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8973
8974 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8975 tmp |= (1 << 27);
8976 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8977
8978 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8979 tmp |= (1 << 27);
8980 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8981
8982 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8983 tmp &= ~(0xF << 28);
8984 tmp |= (4 << 28);
8985 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8986
8987 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8988 tmp &= ~(0xF << 28);
8989 tmp |= (4 << 28);
8990 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8991 }
8992
8993 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8994 * Programming" based on the parameters passed:
8995 * - Sequence to enable CLKOUT_DP
8996 * - Sequence to enable CLKOUT_DP without spread
8997 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8998 */
8999 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9000 bool with_spread, bool with_fdi)
9001 {
9002 u32 reg, tmp;
9003
9004 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9005 with_spread = true;
9006 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9007 with_fdi, "LP PCH doesn't have FDI\n"))
9008 with_fdi = false;
9009
9010 mutex_lock(&dev_priv->sb_lock);
9011
9012 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9013 tmp &= ~SBI_SSCCTL_DISABLE;
9014 tmp |= SBI_SSCCTL_PATHALT;
9015 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9016
9017 udelay(24);
9018
9019 if (with_spread) {
9020 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9021 tmp &= ~SBI_SSCCTL_PATHALT;
9022 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9023
9024 if (with_fdi) {
9025 lpt_reset_fdi_mphy(dev_priv);
9026 lpt_program_fdi_mphy(dev_priv);
9027 }
9028 }
9029
9030 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9031 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9032 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9033 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9034
9035 mutex_unlock(&dev_priv->sb_lock);
9036 }
9037
9038 /* Sequence to disable CLKOUT_DP */
9039 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9040 {
9041 u32 reg, tmp;
9042
9043 mutex_lock(&dev_priv->sb_lock);
9044
9045 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9046 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9047 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9048 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9049
9050 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9051 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9052 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9053 tmp |= SBI_SSCCTL_PATHALT;
9054 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9055 udelay(32);
9056 }
9057 tmp |= SBI_SSCCTL_DISABLE;
9058 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9059 }
9060
9061 mutex_unlock(&dev_priv->sb_lock);
9062 }
9063
9064 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9065
9066 static const u16 sscdivintphase[] = {
9067 [BEND_IDX( 50)] = 0x3B23,
9068 [BEND_IDX( 45)] = 0x3B23,
9069 [BEND_IDX( 40)] = 0x3C23,
9070 [BEND_IDX( 35)] = 0x3C23,
9071 [BEND_IDX( 30)] = 0x3D23,
9072 [BEND_IDX( 25)] = 0x3D23,
9073 [BEND_IDX( 20)] = 0x3E23,
9074 [BEND_IDX( 15)] = 0x3E23,
9075 [BEND_IDX( 10)] = 0x3F23,
9076 [BEND_IDX( 5)] = 0x3F23,
9077 [BEND_IDX( 0)] = 0x0025,
9078 [BEND_IDX( -5)] = 0x0025,
9079 [BEND_IDX(-10)] = 0x0125,
9080 [BEND_IDX(-15)] = 0x0125,
9081 [BEND_IDX(-20)] = 0x0225,
9082 [BEND_IDX(-25)] = 0x0225,
9083 [BEND_IDX(-30)] = 0x0325,
9084 [BEND_IDX(-35)] = 0x0325,
9085 [BEND_IDX(-40)] = 0x0425,
9086 [BEND_IDX(-45)] = 0x0425,
9087 [BEND_IDX(-50)] = 0x0525,
9088 };
9089
9090 /*
9091 * Bend CLKOUT_DP
9092 * steps -50 to 50 inclusive, in steps of 5
9093 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9094 * change in clock period = -(steps / 10) * 5.787 ps
9095 */
9096 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9097 {
9098 u32 tmp;
9099 int idx = BEND_IDX(steps);
9100
9101 if (WARN_ON(steps % 5 != 0))
9102 return;
9103
9104 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9105 return;
9106
9107 mutex_lock(&dev_priv->sb_lock);
9108
9109 if (steps % 10 != 0)
9110 tmp = 0xAAAAAAAB;
9111 else
9112 tmp = 0x00000000;
9113 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9114
9115 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9116 tmp &= 0xffff0000;
9117 tmp |= sscdivintphase[idx];
9118 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9119
9120 mutex_unlock(&dev_priv->sb_lock);
9121 }
9122
9123 #undef BEND_IDX
9124
9125 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9126 {
9127 struct intel_encoder *encoder;
9128 bool has_vga = false;
9129
9130 for_each_intel_encoder(&dev_priv->drm, encoder) {
9131 switch (encoder->type) {
9132 case INTEL_OUTPUT_ANALOG:
9133 has_vga = true;
9134 break;
9135 default:
9136 break;
9137 }
9138 }
9139
9140 if (has_vga) {
9141 lpt_bend_clkout_dp(dev_priv, 0);
9142 lpt_enable_clkout_dp(dev_priv, true, true);
9143 } else {
9144 lpt_disable_clkout_dp(dev_priv);
9145 }
9146 }
9147
9148 /*
9149 * Initialize reference clocks when the driver loads
9150 */
9151 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9152 {
9153 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9154 ironlake_init_pch_refclk(dev_priv);
9155 else if (HAS_PCH_LPT(dev_priv))
9156 lpt_init_pch_refclk(dev_priv);
9157 }
9158
9159 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9160 {
9161 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9162 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9163 enum pipe pipe = crtc->pipe;
9164 u32 val;
9165
9166 val = 0;
9167
9168 switch (crtc_state->pipe_bpp) {
9169 case 18:
9170 val |= PIPECONF_6BPC;
9171 break;
9172 case 24:
9173 val |= PIPECONF_8BPC;
9174 break;
9175 case 30:
9176 val |= PIPECONF_10BPC;
9177 break;
9178 case 36:
9179 val |= PIPECONF_12BPC;
9180 break;
9181 default:
9182 /* Case prevented by intel_choose_pipe_bpp_dither. */
9183 BUG();
9184 }
9185
9186 if (crtc_state->dither)
9187 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9188
9189 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9190 val |= PIPECONF_INTERLACED_ILK;
9191 else
9192 val |= PIPECONF_PROGRESSIVE;
9193
9194 if (crtc_state->limited_color_range)
9195 val |= PIPECONF_COLOR_RANGE_SELECT;
9196
9197 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9198
9199 I915_WRITE(PIPECONF(pipe), val);
9200 POSTING_READ(PIPECONF(pipe));
9201 }
9202
9203 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9204 {
9205 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9206 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9207 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9208 u32 val = 0;
9209
9210 if (IS_HASWELL(dev_priv) && crtc_state->dither)
9211 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9212
9213 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9214 val |= PIPECONF_INTERLACED_ILK;
9215 else
9216 val |= PIPECONF_PROGRESSIVE;
9217
9218 I915_WRITE(PIPECONF(cpu_transcoder), val);
9219 POSTING_READ(PIPECONF(cpu_transcoder));
9220 }
9221
9222 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9223 {
9224 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9225 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9226 u32 val = 0;
9227
9228 switch (crtc_state->pipe_bpp) {
9229 case 18:
9230 val |= PIPEMISC_DITHER_6_BPC;
9231 break;
9232 case 24:
9233 val |= PIPEMISC_DITHER_8_BPC;
9234 break;
9235 case 30:
9236 val |= PIPEMISC_DITHER_10_BPC;
9237 break;
9238 case 36:
9239 val |= PIPEMISC_DITHER_12_BPC;
9240 break;
9241 default:
9242 MISSING_CASE(crtc_state->pipe_bpp);
9243 break;
9244 }
9245
9246 if (crtc_state->dither)
9247 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9248
9249 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9250 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9251 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9252
9253 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9254 val |= PIPEMISC_YUV420_ENABLE |
9255 PIPEMISC_YUV420_MODE_FULL_BLEND;
9256
9257 if (INTEL_GEN(dev_priv) >= 11 &&
9258 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9259 BIT(PLANE_CURSOR))) == 0)
9260 val |= PIPEMISC_HDR_MODE_PRECISION;
9261
9262 I915_WRITE(PIPEMISC(crtc->pipe), val);
9263 }
9264
9265 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9266 {
9267 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9268 u32 tmp;
9269
9270 tmp = I915_READ(PIPEMISC(crtc->pipe));
9271
9272 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9273 case PIPEMISC_DITHER_6_BPC:
9274 return 18;
9275 case PIPEMISC_DITHER_8_BPC:
9276 return 24;
9277 case PIPEMISC_DITHER_10_BPC:
9278 return 30;
9279 case PIPEMISC_DITHER_12_BPC:
9280 return 36;
9281 default:
9282 MISSING_CASE(tmp);
9283 return 0;
9284 }
9285 }
9286
9287 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9288 {
9289 /*
9290 * Account for spread spectrum to avoid
9291 * oversubscribing the link. Max center spread
9292 * is 2.5%; use 5% for safety's sake.
9293 */
9294 u32 bps = target_clock * bpp * 21 / 20;
9295 return DIV_ROUND_UP(bps, link_bw * 8);
9296 }
9297
9298 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9299 {
9300 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9301 }
9302
9303 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9304 struct intel_crtc_state *crtc_state,
9305 struct dpll *reduced_clock)
9306 {
9307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9308 u32 dpll, fp, fp2;
9309 int factor;
9310
9311 /* Enable autotuning of the PLL clock (if permissible) */
9312 factor = 21;
9313 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9314 if ((intel_panel_use_ssc(dev_priv) &&
9315 dev_priv->vbt.lvds_ssc_freq == 100000) ||
9316 (HAS_PCH_IBX(dev_priv) &&
9317 intel_is_dual_link_lvds(dev_priv)))
9318 factor = 25;
9319 } else if (crtc_state->sdvo_tv_clock) {
9320 factor = 20;
9321 }
9322
9323 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9324
9325 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9326 fp |= FP_CB_TUNE;
9327
9328 if (reduced_clock) {
9329 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9330
9331 if (reduced_clock->m < factor * reduced_clock->n)
9332 fp2 |= FP_CB_TUNE;
9333 } else {
9334 fp2 = fp;
9335 }
9336
9337 dpll = 0;
9338
9339 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9340 dpll |= DPLLB_MODE_LVDS;
9341 else
9342 dpll |= DPLLB_MODE_DAC_SERIAL;
9343
9344 dpll |= (crtc_state->pixel_multiplier - 1)
9345 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9346
9347 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9348 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9349 dpll |= DPLL_SDVO_HIGH_SPEED;
9350
9351 if (intel_crtc_has_dp_encoder(crtc_state))
9352 dpll |= DPLL_SDVO_HIGH_SPEED;
9353
9354 /*
9355 * The high speed IO clock is only really required for
9356 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9357 * possible to share the DPLL between CRT and HDMI. Enabling
9358 * the clock needlessly does no real harm, except use up a
9359 * bit of power potentially.
9360 *
9361 * We'll limit this to IVB with 3 pipes, since it has only two
9362 * DPLLs and so DPLL sharing is the only way to get three pipes
9363 * driving PCH ports at the same time. On SNB we could do this,
9364 * and potentially avoid enabling the second DPLL, but it's not
9365 * clear if it''s a win or loss power wise. No point in doing
9366 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9367 */
9368 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9369 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9370 dpll |= DPLL_SDVO_HIGH_SPEED;
9371
9372 /* compute bitmask from p1 value */
9373 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9374 /* also FPA1 */
9375 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9376
9377 switch (crtc_state->dpll.p2) {
9378 case 5:
9379 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9380 break;
9381 case 7:
9382 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9383 break;
9384 case 10:
9385 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9386 break;
9387 case 14:
9388 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9389 break;
9390 }
9391
9392 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9393 intel_panel_use_ssc(dev_priv))
9394 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9395 else
9396 dpll |= PLL_REF_INPUT_DREFCLK;
9397
9398 dpll |= DPLL_VCO_ENABLE;
9399
9400 crtc_state->dpll_hw_state.dpll = dpll;
9401 crtc_state->dpll_hw_state.fp0 = fp;
9402 crtc_state->dpll_hw_state.fp1 = fp2;
9403 }
9404
9405 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9406 struct intel_crtc_state *crtc_state)
9407 {
9408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9409 const struct intel_limit *limit;
9410 int refclk = 120000;
9411
9412 memset(&crtc_state->dpll_hw_state, 0,
9413 sizeof(crtc_state->dpll_hw_state));
9414
9415 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9416 if (!crtc_state->has_pch_encoder)
9417 return 0;
9418
9419 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9420 if (intel_panel_use_ssc(dev_priv)) {
9421 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9422 dev_priv->vbt.lvds_ssc_freq);
9423 refclk = dev_priv->vbt.lvds_ssc_freq;
9424 }
9425
9426 if (intel_is_dual_link_lvds(dev_priv)) {
9427 if (refclk == 100000)
9428 limit = &intel_limits_ironlake_dual_lvds_100m;
9429 else
9430 limit = &intel_limits_ironlake_dual_lvds;
9431 } else {
9432 if (refclk == 100000)
9433 limit = &intel_limits_ironlake_single_lvds_100m;
9434 else
9435 limit = &intel_limits_ironlake_single_lvds;
9436 }
9437 } else {
9438 limit = &intel_limits_ironlake_dac;
9439 }
9440
9441 if (!crtc_state->clock_set &&
9442 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9443 refclk, NULL, &crtc_state->dpll)) {
9444 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9445 return -EINVAL;
9446 }
9447
9448 ironlake_compute_dpll(crtc, crtc_state, NULL);
9449
9450 if (!intel_get_shared_dpll(crtc_state, NULL)) {
9451 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9452 pipe_name(crtc->pipe));
9453 return -EINVAL;
9454 }
9455
9456 return 0;
9457 }
9458
9459 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9460 struct intel_link_m_n *m_n)
9461 {
9462 struct drm_device *dev = crtc->base.dev;
9463 struct drm_i915_private *dev_priv = to_i915(dev);
9464 enum pipe pipe = crtc->pipe;
9465
9466 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9467 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9468 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9469 & ~TU_SIZE_MASK;
9470 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9471 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9472 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9473 }
9474
9475 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9476 enum transcoder transcoder,
9477 struct intel_link_m_n *m_n,
9478 struct intel_link_m_n *m2_n2)
9479 {
9480 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9481 enum pipe pipe = crtc->pipe;
9482
9483 if (INTEL_GEN(dev_priv) >= 5) {
9484 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9485 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9486 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9487 & ~TU_SIZE_MASK;
9488 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9489 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9490 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9491
9492 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9493 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9494 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9495 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9496 & ~TU_SIZE_MASK;
9497 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9498 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9499 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9500 }
9501 } else {
9502 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9503 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9504 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9505 & ~TU_SIZE_MASK;
9506 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9507 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9508 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9509 }
9510 }
9511
9512 void intel_dp_get_m_n(struct intel_crtc *crtc,
9513 struct intel_crtc_state *pipe_config)
9514 {
9515 if (pipe_config->has_pch_encoder)
9516 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9517 else
9518 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9519 &pipe_config->dp_m_n,
9520 &pipe_config->dp_m2_n2);
9521 }
9522
9523 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9524 struct intel_crtc_state *pipe_config)
9525 {
9526 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9527 &pipe_config->fdi_m_n, NULL);
9528 }
9529
9530 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9531 struct intel_crtc_state *pipe_config)
9532 {
9533 struct drm_device *dev = crtc->base.dev;
9534 struct drm_i915_private *dev_priv = to_i915(dev);
9535 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9536 u32 ps_ctrl = 0;
9537 int id = -1;
9538 int i;
9539
9540 /* find scaler attached to this pipe */
9541 for (i = 0; i < crtc->num_scalers; i++) {
9542 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9543 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9544 id = i;
9545 pipe_config->pch_pfit.enabled = true;
9546 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9547 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9548 scaler_state->scalers[i].in_use = true;
9549 break;
9550 }
9551 }
9552
9553 scaler_state->scaler_id = id;
9554 if (id >= 0) {
9555 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9556 } else {
9557 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9558 }
9559 }
9560
9561 static void
9562 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9563 struct intel_initial_plane_config *plane_config)
9564 {
9565 struct drm_device *dev = crtc->base.dev;
9566 struct drm_i915_private *dev_priv = to_i915(dev);
9567 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9568 enum plane_id plane_id = plane->id;
9569 enum pipe pipe;
9570 u32 val, base, offset, stride_mult, tiling, alpha;
9571 int fourcc, pixel_format;
9572 unsigned int aligned_height;
9573 struct drm_framebuffer *fb;
9574 struct intel_framebuffer *intel_fb;
9575
9576 if (!plane->get_hw_state(plane, &pipe))
9577 return;
9578
9579 WARN_ON(pipe != crtc->pipe);
9580
9581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9582 if (!intel_fb) {
9583 DRM_DEBUG_KMS("failed to alloc fb\n");
9584 return;
9585 }
9586
9587 fb = &intel_fb->base;
9588
9589 fb->dev = dev;
9590
9591 val = I915_READ(PLANE_CTL(pipe, plane_id));
9592
9593 if (INTEL_GEN(dev_priv) >= 11)
9594 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9595 else
9596 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9597
9598 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9599 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9600 alpha &= PLANE_COLOR_ALPHA_MASK;
9601 } else {
9602 alpha = val & PLANE_CTL_ALPHA_MASK;
9603 }
9604
9605 fourcc = skl_format_to_fourcc(pixel_format,
9606 val & PLANE_CTL_ORDER_RGBX, alpha);
9607 fb->format = drm_format_info(fourcc);
9608
9609 tiling = val & PLANE_CTL_TILED_MASK;
9610 switch (tiling) {
9611 case PLANE_CTL_TILED_LINEAR:
9612 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9613 break;
9614 case PLANE_CTL_TILED_X:
9615 plane_config->tiling = I915_TILING_X;
9616 fb->modifier = I915_FORMAT_MOD_X_TILED;
9617 break;
9618 case PLANE_CTL_TILED_Y:
9619 plane_config->tiling = I915_TILING_Y;
9620 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9621 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9622 else
9623 fb->modifier = I915_FORMAT_MOD_Y_TILED;
9624 break;
9625 case PLANE_CTL_TILED_YF:
9626 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9627 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9628 else
9629 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9630 break;
9631 default:
9632 MISSING_CASE(tiling);
9633 goto error;
9634 }
9635
9636 /*
9637 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9638 * while i915 HW rotation is clockwise, thats why this swapping.
9639 */
9640 switch (val & PLANE_CTL_ROTATE_MASK) {
9641 case PLANE_CTL_ROTATE_0:
9642 plane_config->rotation = DRM_MODE_ROTATE_0;
9643 break;
9644 case PLANE_CTL_ROTATE_90:
9645 plane_config->rotation = DRM_MODE_ROTATE_270;
9646 break;
9647 case PLANE_CTL_ROTATE_180:
9648 plane_config->rotation = DRM_MODE_ROTATE_180;
9649 break;
9650 case PLANE_CTL_ROTATE_270:
9651 plane_config->rotation = DRM_MODE_ROTATE_90;
9652 break;
9653 }
9654
9655 if (INTEL_GEN(dev_priv) >= 10 &&
9656 val & PLANE_CTL_FLIP_HORIZONTAL)
9657 plane_config->rotation |= DRM_MODE_REFLECT_X;
9658
9659 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9660 plane_config->base = base;
9661
9662 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9663
9664 val = I915_READ(PLANE_SIZE(pipe, plane_id));
9665 fb->height = ((val >> 16) & 0xfff) + 1;
9666 fb->width = ((val >> 0) & 0x1fff) + 1;
9667
9668 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9669 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9670 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9671
9672 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9673
9674 plane_config->size = fb->pitches[0] * aligned_height;
9675
9676 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9677 crtc->base.name, plane->base.name, fb->width, fb->height,
9678 fb->format->cpp[0] * 8, base, fb->pitches[0],
9679 plane_config->size);
9680
9681 plane_config->fb = intel_fb;
9682 return;
9683
9684 error:
9685 kfree(intel_fb);
9686 }
9687
9688 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9689 struct intel_crtc_state *pipe_config)
9690 {
9691 struct drm_device *dev = crtc->base.dev;
9692 struct drm_i915_private *dev_priv = to_i915(dev);
9693 u32 tmp;
9694
9695 tmp = I915_READ(PF_CTL(crtc->pipe));
9696
9697 if (tmp & PF_ENABLE) {
9698 pipe_config->pch_pfit.enabled = true;
9699 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9700 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9701
9702 /* We currently do not free assignements of panel fitters on
9703 * ivb/hsw (since we don't use the higher upscaling modes which
9704 * differentiates them) so just WARN about this case for now. */
9705 if (IS_GEN(dev_priv, 7)) {
9706 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9707 PF_PIPE_SEL_IVB(crtc->pipe));
9708 }
9709 }
9710 }
9711
9712 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9713 struct intel_crtc_state *pipe_config)
9714 {
9715 struct drm_device *dev = crtc->base.dev;
9716 struct drm_i915_private *dev_priv = to_i915(dev);
9717 enum intel_display_power_domain power_domain;
9718 intel_wakeref_t wakeref;
9719 u32 tmp;
9720 bool ret;
9721
9722 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9723 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9724 if (!wakeref)
9725 return false;
9726
9727 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9728 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9729 pipe_config->shared_dpll = NULL;
9730
9731 ret = false;
9732 tmp = I915_READ(PIPECONF(crtc->pipe));
9733 if (!(tmp & PIPECONF_ENABLE))
9734 goto out;
9735
9736 switch (tmp & PIPECONF_BPC_MASK) {
9737 case PIPECONF_6BPC:
9738 pipe_config->pipe_bpp = 18;
9739 break;
9740 case PIPECONF_8BPC:
9741 pipe_config->pipe_bpp = 24;
9742 break;
9743 case PIPECONF_10BPC:
9744 pipe_config->pipe_bpp = 30;
9745 break;
9746 case PIPECONF_12BPC:
9747 pipe_config->pipe_bpp = 36;
9748 break;
9749 default:
9750 break;
9751 }
9752
9753 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9754 pipe_config->limited_color_range = true;
9755
9756 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9757 PIPECONF_GAMMA_MODE_SHIFT;
9758
9759 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9760
9761 i9xx_get_pipe_color_config(pipe_config);
9762 intel_color_get_config(pipe_config);
9763
9764 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9765 struct intel_shared_dpll *pll;
9766 enum intel_dpll_id pll_id;
9767
9768 pipe_config->has_pch_encoder = true;
9769
9770 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9771 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9772 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9773
9774 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9775
9776 if (HAS_PCH_IBX(dev_priv)) {
9777 /*
9778 * The pipe->pch transcoder and pch transcoder->pll
9779 * mapping is fixed.
9780 */
9781 pll_id = (enum intel_dpll_id) crtc->pipe;
9782 } else {
9783 tmp = I915_READ(PCH_DPLL_SEL);
9784 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9785 pll_id = DPLL_ID_PCH_PLL_B;
9786 else
9787 pll_id= DPLL_ID_PCH_PLL_A;
9788 }
9789
9790 pipe_config->shared_dpll =
9791 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9792 pll = pipe_config->shared_dpll;
9793
9794 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9795 &pipe_config->dpll_hw_state));
9796
9797 tmp = pipe_config->dpll_hw_state.dpll;
9798 pipe_config->pixel_multiplier =
9799 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9800 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9801
9802 ironlake_pch_clock_get(crtc, pipe_config);
9803 } else {
9804 pipe_config->pixel_multiplier = 1;
9805 }
9806
9807 intel_get_pipe_timings(crtc, pipe_config);
9808 intel_get_pipe_src_size(crtc, pipe_config);
9809
9810 ironlake_get_pfit_config(crtc, pipe_config);
9811
9812 ret = true;
9813
9814 out:
9815 intel_display_power_put(dev_priv, power_domain, wakeref);
9816
9817 return ret;
9818 }
9819 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9820 struct intel_crtc_state *crtc_state)
9821 {
9822 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9823 struct intel_atomic_state *state =
9824 to_intel_atomic_state(crtc_state->base.state);
9825
9826 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9827 INTEL_GEN(dev_priv) >= 11) {
9828 struct intel_encoder *encoder =
9829 intel_get_crtc_new_encoder(state, crtc_state);
9830
9831 if (!intel_get_shared_dpll(crtc_state, encoder)) {
9832 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9833 pipe_name(crtc->pipe));
9834 return -EINVAL;
9835 }
9836 }
9837
9838 return 0;
9839 }
9840
9841 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9842 enum port port,
9843 struct intel_crtc_state *pipe_config)
9844 {
9845 enum intel_dpll_id id;
9846 u32 temp;
9847
9848 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9849 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9850
9851 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9852 return;
9853
9854 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9855 }
9856
9857 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9858 enum port port,
9859 struct intel_crtc_state *pipe_config)
9860 {
9861 enum intel_dpll_id id;
9862 u32 temp;
9863
9864 /* TODO: TBT pll not implemented. */
9865 if (intel_port_is_combophy(dev_priv, port)) {
9866 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9867 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9868 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9869 } else if (intel_port_is_tc(dev_priv, port)) {
9870 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9871 } else {
9872 WARN(1, "Invalid port %x\n", port);
9873 return;
9874 }
9875
9876 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9877 }
9878
9879 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9880 enum port port,
9881 struct intel_crtc_state *pipe_config)
9882 {
9883 enum intel_dpll_id id;
9884
9885 switch (port) {
9886 case PORT_A:
9887 id = DPLL_ID_SKL_DPLL0;
9888 break;
9889 case PORT_B:
9890 id = DPLL_ID_SKL_DPLL1;
9891 break;
9892 case PORT_C:
9893 id = DPLL_ID_SKL_DPLL2;
9894 break;
9895 default:
9896 DRM_ERROR("Incorrect port type\n");
9897 return;
9898 }
9899
9900 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9901 }
9902
9903 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9904 enum port port,
9905 struct intel_crtc_state *pipe_config)
9906 {
9907 enum intel_dpll_id id;
9908 u32 temp;
9909
9910 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9911 id = temp >> (port * 3 + 1);
9912
9913 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9914 return;
9915
9916 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9917 }
9918
9919 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9920 enum port port,
9921 struct intel_crtc_state *pipe_config)
9922 {
9923 enum intel_dpll_id id;
9924 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9925
9926 switch (ddi_pll_sel) {
9927 case PORT_CLK_SEL_WRPLL1:
9928 id = DPLL_ID_WRPLL1;
9929 break;
9930 case PORT_CLK_SEL_WRPLL2:
9931 id = DPLL_ID_WRPLL2;
9932 break;
9933 case PORT_CLK_SEL_SPLL:
9934 id = DPLL_ID_SPLL;
9935 break;
9936 case PORT_CLK_SEL_LCPLL_810:
9937 id = DPLL_ID_LCPLL_810;
9938 break;
9939 case PORT_CLK_SEL_LCPLL_1350:
9940 id = DPLL_ID_LCPLL_1350;
9941 break;
9942 case PORT_CLK_SEL_LCPLL_2700:
9943 id = DPLL_ID_LCPLL_2700;
9944 break;
9945 default:
9946 MISSING_CASE(ddi_pll_sel);
9947 /* fall through */
9948 case PORT_CLK_SEL_NONE:
9949 return;
9950 }
9951
9952 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9953 }
9954
9955 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9956 struct intel_crtc_state *pipe_config,
9957 u64 *power_domain_mask,
9958 intel_wakeref_t *wakerefs)
9959 {
9960 struct drm_device *dev = crtc->base.dev;
9961 struct drm_i915_private *dev_priv = to_i915(dev);
9962 enum intel_display_power_domain power_domain;
9963 unsigned long panel_transcoder_mask = 0;
9964 unsigned long enabled_panel_transcoders = 0;
9965 enum transcoder panel_transcoder;
9966 intel_wakeref_t wf;
9967 u32 tmp;
9968
9969 if (INTEL_GEN(dev_priv) >= 11)
9970 panel_transcoder_mask |=
9971 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9972
9973 if (HAS_TRANSCODER_EDP(dev_priv))
9974 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
9975
9976 /*
9977 * The pipe->transcoder mapping is fixed with the exception of the eDP
9978 * and DSI transcoders handled below.
9979 */
9980 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9981
9982 /*
9983 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9984 * consistency and less surprising code; it's in always on power).
9985 */
9986 for_each_set_bit(panel_transcoder,
9987 &panel_transcoder_mask,
9988 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9989 bool force_thru = false;
9990 enum pipe trans_pipe;
9991
9992 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9993 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9994 continue;
9995
9996 /*
9997 * Log all enabled ones, only use the first one.
9998 *
9999 * FIXME: This won't work for two separate DSI displays.
10000 */
10001 enabled_panel_transcoders |= BIT(panel_transcoder);
10002 if (enabled_panel_transcoders != BIT(panel_transcoder))
10003 continue;
10004
10005 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10006 default:
10007 WARN(1, "unknown pipe linked to transcoder %s\n",
10008 transcoder_name(panel_transcoder));
10009 /* fall through */
10010 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10011 force_thru = true;
10012 /* fall through */
10013 case TRANS_DDI_EDP_INPUT_A_ON:
10014 trans_pipe = PIPE_A;
10015 break;
10016 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10017 trans_pipe = PIPE_B;
10018 break;
10019 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10020 trans_pipe = PIPE_C;
10021 break;
10022 }
10023
10024 if (trans_pipe == crtc->pipe) {
10025 pipe_config->cpu_transcoder = panel_transcoder;
10026 pipe_config->pch_pfit.force_thru = force_thru;
10027 }
10028 }
10029
10030 /*
10031 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10032 */
10033 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10034 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10035
10036 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10037 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10038
10039 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10040 if (!wf)
10041 return false;
10042
10043 wakerefs[power_domain] = wf;
10044 *power_domain_mask |= BIT_ULL(power_domain);
10045
10046 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10047
10048 return tmp & PIPECONF_ENABLE;
10049 }
10050
10051 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10052 struct intel_crtc_state *pipe_config,
10053 u64 *power_domain_mask,
10054 intel_wakeref_t *wakerefs)
10055 {
10056 struct drm_device *dev = crtc->base.dev;
10057 struct drm_i915_private *dev_priv = to_i915(dev);
10058 enum intel_display_power_domain power_domain;
10059 enum transcoder cpu_transcoder;
10060 intel_wakeref_t wf;
10061 enum port port;
10062 u32 tmp;
10063
10064 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10065 if (port == PORT_A)
10066 cpu_transcoder = TRANSCODER_DSI_A;
10067 else
10068 cpu_transcoder = TRANSCODER_DSI_C;
10069
10070 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10071 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10072
10073 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10074 if (!wf)
10075 continue;
10076
10077 wakerefs[power_domain] = wf;
10078 *power_domain_mask |= BIT_ULL(power_domain);
10079
10080 /*
10081 * The PLL needs to be enabled with a valid divider
10082 * configuration, otherwise accessing DSI registers will hang
10083 * the machine. See BSpec North Display Engine
10084 * registers/MIPI[BXT]. We can break out here early, since we
10085 * need the same DSI PLL to be enabled for both DSI ports.
10086 */
10087 if (!bxt_dsi_pll_is_enabled(dev_priv))
10088 break;
10089
10090 /* XXX: this works for video mode only */
10091 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10092 if (!(tmp & DPI_ENABLE))
10093 continue;
10094
10095 tmp = I915_READ(MIPI_CTRL(port));
10096 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10097 continue;
10098
10099 pipe_config->cpu_transcoder = cpu_transcoder;
10100 break;
10101 }
10102
10103 return transcoder_is_dsi(pipe_config->cpu_transcoder);
10104 }
10105
10106 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10107 struct intel_crtc_state *pipe_config)
10108 {
10109 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10110 struct intel_shared_dpll *pll;
10111 enum port port;
10112 u32 tmp;
10113
10114 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10115
10116 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10117
10118 if (INTEL_GEN(dev_priv) >= 11)
10119 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10120 else if (IS_CANNONLAKE(dev_priv))
10121 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10122 else if (IS_GEN9_BC(dev_priv))
10123 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10124 else if (IS_GEN9_LP(dev_priv))
10125 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10126 else
10127 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10128
10129 pll = pipe_config->shared_dpll;
10130 if (pll) {
10131 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10132 &pipe_config->dpll_hw_state));
10133 }
10134
10135 /*
10136 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10137 * DDI E. So just check whether this pipe is wired to DDI E and whether
10138 * the PCH transcoder is on.
10139 */
10140 if (INTEL_GEN(dev_priv) < 9 &&
10141 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10142 pipe_config->has_pch_encoder = true;
10143
10144 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10145 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10146 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10147
10148 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10149 }
10150 }
10151
10152 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10153 struct intel_crtc_state *pipe_config)
10154 {
10155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10156 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10157 enum intel_display_power_domain power_domain;
10158 u64 power_domain_mask;
10159 bool active;
10160
10161 intel_crtc_init_scalers(crtc, pipe_config);
10162
10163 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10164 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10165 if (!wf)
10166 return false;
10167
10168 wakerefs[power_domain] = wf;
10169 power_domain_mask = BIT_ULL(power_domain);
10170
10171 pipe_config->shared_dpll = NULL;
10172
10173 active = hsw_get_transcoder_state(crtc, pipe_config,
10174 &power_domain_mask, wakerefs);
10175
10176 if (IS_GEN9_LP(dev_priv) &&
10177 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10178 &power_domain_mask, wakerefs)) {
10179 WARN_ON(active);
10180 active = true;
10181 }
10182
10183 if (!active)
10184 goto out;
10185
10186 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10187 INTEL_GEN(dev_priv) >= 11) {
10188 haswell_get_ddi_port_state(crtc, pipe_config);
10189 intel_get_pipe_timings(crtc, pipe_config);
10190 }
10191
10192 intel_get_pipe_src_size(crtc, pipe_config);
10193 intel_get_crtc_ycbcr_config(crtc, pipe_config);
10194
10195 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10196
10197 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10198
10199 if (INTEL_GEN(dev_priv) >= 9) {
10200 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10201
10202 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10203 pipe_config->gamma_enable = true;
10204
10205 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10206 pipe_config->csc_enable = true;
10207 } else {
10208 i9xx_get_pipe_color_config(pipe_config);
10209 }
10210
10211 intel_color_get_config(pipe_config);
10212
10213 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10214 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10215
10216 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10217 if (wf) {
10218 wakerefs[power_domain] = wf;
10219 power_domain_mask |= BIT_ULL(power_domain);
10220
10221 if (INTEL_GEN(dev_priv) >= 9)
10222 skylake_get_pfit_config(crtc, pipe_config);
10223 else
10224 ironlake_get_pfit_config(crtc, pipe_config);
10225 }
10226
10227 if (hsw_crtc_supports_ips(crtc)) {
10228 if (IS_HASWELL(dev_priv))
10229 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10230 else {
10231 /*
10232 * We cannot readout IPS state on broadwell, set to
10233 * true so we can set it to a defined state on first
10234 * commit.
10235 */
10236 pipe_config->ips_enabled = true;
10237 }
10238 }
10239
10240 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10241 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10242 pipe_config->pixel_multiplier =
10243 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10244 } else {
10245 pipe_config->pixel_multiplier = 1;
10246 }
10247
10248 out:
10249 for_each_power_domain(power_domain, power_domain_mask)
10250 intel_display_power_put(dev_priv,
10251 power_domain, wakerefs[power_domain]);
10252
10253 return active;
10254 }
10255
10256 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10257 {
10258 struct drm_i915_private *dev_priv =
10259 to_i915(plane_state->base.plane->dev);
10260 const struct drm_framebuffer *fb = plane_state->base.fb;
10261 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10262 u32 base;
10263
10264 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10265 base = obj->phys_handle->busaddr;
10266 else
10267 base = intel_plane_ggtt_offset(plane_state);
10268
10269 base += plane_state->color_plane[0].offset;
10270
10271 /* ILK+ do this automagically */
10272 if (HAS_GMCH(dev_priv) &&
10273 plane_state->base.rotation & DRM_MODE_ROTATE_180)
10274 base += (plane_state->base.crtc_h *
10275 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10276
10277 return base;
10278 }
10279
10280 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10281 {
10282 int x = plane_state->base.crtc_x;
10283 int y = plane_state->base.crtc_y;
10284 u32 pos = 0;
10285
10286 if (x < 0) {
10287 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10288 x = -x;
10289 }
10290 pos |= x << CURSOR_X_SHIFT;
10291
10292 if (y < 0) {
10293 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10294 y = -y;
10295 }
10296 pos |= y << CURSOR_Y_SHIFT;
10297
10298 return pos;
10299 }
10300
10301 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10302 {
10303 const struct drm_mode_config *config =
10304 &plane_state->base.plane->dev->mode_config;
10305 int width = plane_state->base.crtc_w;
10306 int height = plane_state->base.crtc_h;
10307
10308 return width > 0 && width <= config->cursor_width &&
10309 height > 0 && height <= config->cursor_height;
10310 }
10311
10312 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10313 {
10314 int src_x, src_y;
10315 u32 offset;
10316 int ret;
10317
10318 ret = intel_plane_compute_gtt(plane_state);
10319 if (ret)
10320 return ret;
10321
10322 if (!plane_state->base.visible)
10323 return 0;
10324
10325 src_x = plane_state->base.src_x >> 16;
10326 src_y = plane_state->base.src_y >> 16;
10327
10328 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10329 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10330 plane_state, 0);
10331
10332 if (src_x != 0 || src_y != 0) {
10333 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10334 return -EINVAL;
10335 }
10336
10337 plane_state->color_plane[0].offset = offset;
10338
10339 return 0;
10340 }
10341
10342 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10343 struct intel_plane_state *plane_state)
10344 {
10345 const struct drm_framebuffer *fb = plane_state->base.fb;
10346 int ret;
10347
10348 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10349 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10350 return -EINVAL;
10351 }
10352
10353 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10354 &crtc_state->base,
10355 DRM_PLANE_HELPER_NO_SCALING,
10356 DRM_PLANE_HELPER_NO_SCALING,
10357 true, true);
10358 if (ret)
10359 return ret;
10360
10361 ret = intel_cursor_check_surface(plane_state);
10362 if (ret)
10363 return ret;
10364
10365 if (!plane_state->base.visible)
10366 return 0;
10367
10368 ret = intel_plane_check_src_coordinates(plane_state);
10369 if (ret)
10370 return ret;
10371
10372 return 0;
10373 }
10374
10375 static unsigned int
10376 i845_cursor_max_stride(struct intel_plane *plane,
10377 u32 pixel_format, u64 modifier,
10378 unsigned int rotation)
10379 {
10380 return 2048;
10381 }
10382
10383 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10384 {
10385 u32 cntl = 0;
10386
10387 if (crtc_state->gamma_enable)
10388 cntl |= CURSOR_GAMMA_ENABLE;
10389
10390 return cntl;
10391 }
10392
10393 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10394 const struct intel_plane_state *plane_state)
10395 {
10396 return CURSOR_ENABLE |
10397 CURSOR_FORMAT_ARGB |
10398 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10399 }
10400
10401 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10402 {
10403 int width = plane_state->base.crtc_w;
10404
10405 /*
10406 * 845g/865g are only limited by the width of their cursors,
10407 * the height is arbitrary up to the precision of the register.
10408 */
10409 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10410 }
10411
10412 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10413 struct intel_plane_state *plane_state)
10414 {
10415 const struct drm_framebuffer *fb = plane_state->base.fb;
10416 int ret;
10417
10418 ret = intel_check_cursor(crtc_state, plane_state);
10419 if (ret)
10420 return ret;
10421
10422 /* if we want to turn off the cursor ignore width and height */
10423 if (!fb)
10424 return 0;
10425
10426 /* Check for which cursor types we support */
10427 if (!i845_cursor_size_ok(plane_state)) {
10428 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10429 plane_state->base.crtc_w,
10430 plane_state->base.crtc_h);
10431 return -EINVAL;
10432 }
10433
10434 WARN_ON(plane_state->base.visible &&
10435 plane_state->color_plane[0].stride != fb->pitches[0]);
10436
10437 switch (fb->pitches[0]) {
10438 case 256:
10439 case 512:
10440 case 1024:
10441 case 2048:
10442 break;
10443 default:
10444 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10445 fb->pitches[0]);
10446 return -EINVAL;
10447 }
10448
10449 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10450
10451 return 0;
10452 }
10453
10454 static void i845_update_cursor(struct intel_plane *plane,
10455 const struct intel_crtc_state *crtc_state,
10456 const struct intel_plane_state *plane_state)
10457 {
10458 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10459 u32 cntl = 0, base = 0, pos = 0, size = 0;
10460 unsigned long irqflags;
10461
10462 if (plane_state && plane_state->base.visible) {
10463 unsigned int width = plane_state->base.crtc_w;
10464 unsigned int height = plane_state->base.crtc_h;
10465
10466 cntl = plane_state->ctl |
10467 i845_cursor_ctl_crtc(crtc_state);
10468
10469 size = (height << 12) | width;
10470
10471 base = intel_cursor_base(plane_state);
10472 pos = intel_cursor_position(plane_state);
10473 }
10474
10475 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10476
10477 /* On these chipsets we can only modify the base/size/stride
10478 * whilst the cursor is disabled.
10479 */
10480 if (plane->cursor.base != base ||
10481 plane->cursor.size != size ||
10482 plane->cursor.cntl != cntl) {
10483 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10484 I915_WRITE_FW(CURBASE(PIPE_A), base);
10485 I915_WRITE_FW(CURSIZE, size);
10486 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10487 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10488
10489 plane->cursor.base = base;
10490 plane->cursor.size = size;
10491 plane->cursor.cntl = cntl;
10492 } else {
10493 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10494 }
10495
10496 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10497 }
10498
10499 static void i845_disable_cursor(struct intel_plane *plane,
10500 const struct intel_crtc_state *crtc_state)
10501 {
10502 i845_update_cursor(plane, crtc_state, NULL);
10503 }
10504
10505 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10506 enum pipe *pipe)
10507 {
10508 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10509 enum intel_display_power_domain power_domain;
10510 intel_wakeref_t wakeref;
10511 bool ret;
10512
10513 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10514 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10515 if (!wakeref)
10516 return false;
10517
10518 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10519
10520 *pipe = PIPE_A;
10521
10522 intel_display_power_put(dev_priv, power_domain, wakeref);
10523
10524 return ret;
10525 }
10526
10527 static unsigned int
10528 i9xx_cursor_max_stride(struct intel_plane *plane,
10529 u32 pixel_format, u64 modifier,
10530 unsigned int rotation)
10531 {
10532 return plane->base.dev->mode_config.cursor_width * 4;
10533 }
10534
10535 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10536 {
10537 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10539 u32 cntl = 0;
10540
10541 if (INTEL_GEN(dev_priv) >= 11)
10542 return cntl;
10543
10544 if (crtc_state->gamma_enable)
10545 cntl = MCURSOR_GAMMA_ENABLE;
10546
10547 if (crtc_state->csc_enable)
10548 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10549
10550 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10551 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10552
10553 return cntl;
10554 }
10555
10556 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10557 const struct intel_plane_state *plane_state)
10558 {
10559 struct drm_i915_private *dev_priv =
10560 to_i915(plane_state->base.plane->dev);
10561 u32 cntl = 0;
10562
10563 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10564 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10565
10566 switch (plane_state->base.crtc_w) {
10567 case 64:
10568 cntl |= MCURSOR_MODE_64_ARGB_AX;
10569 break;
10570 case 128:
10571 cntl |= MCURSOR_MODE_128_ARGB_AX;
10572 break;
10573 case 256:
10574 cntl |= MCURSOR_MODE_256_ARGB_AX;
10575 break;
10576 default:
10577 MISSING_CASE(plane_state->base.crtc_w);
10578 return 0;
10579 }
10580
10581 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10582 cntl |= MCURSOR_ROTATE_180;
10583
10584 return cntl;
10585 }
10586
10587 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10588 {
10589 struct drm_i915_private *dev_priv =
10590 to_i915(plane_state->base.plane->dev);
10591 int width = plane_state->base.crtc_w;
10592 int height = plane_state->base.crtc_h;
10593
10594 if (!intel_cursor_size_ok(plane_state))
10595 return false;
10596
10597 /* Cursor width is limited to a few power-of-two sizes */
10598 switch (width) {
10599 case 256:
10600 case 128:
10601 case 64:
10602 break;
10603 default:
10604 return false;
10605 }
10606
10607 /*
10608 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10609 * height from 8 lines up to the cursor width, when the
10610 * cursor is not rotated. Everything else requires square
10611 * cursors.
10612 */
10613 if (HAS_CUR_FBC(dev_priv) &&
10614 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10615 if (height < 8 || height > width)
10616 return false;
10617 } else {
10618 if (height != width)
10619 return false;
10620 }
10621
10622 return true;
10623 }
10624
10625 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10626 struct intel_plane_state *plane_state)
10627 {
10628 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10629 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10630 const struct drm_framebuffer *fb = plane_state->base.fb;
10631 enum pipe pipe = plane->pipe;
10632 int ret;
10633
10634 ret = intel_check_cursor(crtc_state, plane_state);
10635 if (ret)
10636 return ret;
10637
10638 /* if we want to turn off the cursor ignore width and height */
10639 if (!fb)
10640 return 0;
10641
10642 /* Check for which cursor types we support */
10643 if (!i9xx_cursor_size_ok(plane_state)) {
10644 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10645 plane_state->base.crtc_w,
10646 plane_state->base.crtc_h);
10647 return -EINVAL;
10648 }
10649
10650 WARN_ON(plane_state->base.visible &&
10651 plane_state->color_plane[0].stride != fb->pitches[0]);
10652
10653 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10654 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10655 fb->pitches[0], plane_state->base.crtc_w);
10656 return -EINVAL;
10657 }
10658
10659 /*
10660 * There's something wrong with the cursor on CHV pipe C.
10661 * If it straddles the left edge of the screen then
10662 * moving it away from the edge or disabling it often
10663 * results in a pipe underrun, and often that can lead to
10664 * dead pipe (constant underrun reported, and it scans
10665 * out just a solid color). To recover from that, the
10666 * display power well must be turned off and on again.
10667 * Refuse the put the cursor into that compromised position.
10668 */
10669 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10670 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10671 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10672 return -EINVAL;
10673 }
10674
10675 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10676
10677 return 0;
10678 }
10679
10680 static void i9xx_update_cursor(struct intel_plane *plane,
10681 const struct intel_crtc_state *crtc_state,
10682 const struct intel_plane_state *plane_state)
10683 {
10684 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10685 enum pipe pipe = plane->pipe;
10686 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10687 unsigned long irqflags;
10688
10689 if (plane_state && plane_state->base.visible) {
10690 cntl = plane_state->ctl |
10691 i9xx_cursor_ctl_crtc(crtc_state);
10692
10693 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10694 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10695
10696 base = intel_cursor_base(plane_state);
10697 pos = intel_cursor_position(plane_state);
10698 }
10699
10700 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10701
10702 /*
10703 * On some platforms writing CURCNTR first will also
10704 * cause CURPOS to be armed by the CURBASE write.
10705 * Without the CURCNTR write the CURPOS write would
10706 * arm itself. Thus we always update CURCNTR before
10707 * CURPOS.
10708 *
10709 * On other platforms CURPOS always requires the
10710 * CURBASE write to arm the update. Additonally
10711 * a write to any of the cursor register will cancel
10712 * an already armed cursor update. Thus leaving out
10713 * the CURBASE write after CURPOS could lead to a
10714 * cursor that doesn't appear to move, or even change
10715 * shape. Thus we always write CURBASE.
10716 *
10717 * The other registers are armed by by the CURBASE write
10718 * except when the plane is getting enabled at which time
10719 * the CURCNTR write arms the update.
10720 */
10721
10722 if (INTEL_GEN(dev_priv) >= 9)
10723 skl_write_cursor_wm(plane, crtc_state);
10724
10725 if (plane->cursor.base != base ||
10726 plane->cursor.size != fbc_ctl ||
10727 plane->cursor.cntl != cntl) {
10728 if (HAS_CUR_FBC(dev_priv))
10729 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10730 I915_WRITE_FW(CURCNTR(pipe), cntl);
10731 I915_WRITE_FW(CURPOS(pipe), pos);
10732 I915_WRITE_FW(CURBASE(pipe), base);
10733
10734 plane->cursor.base = base;
10735 plane->cursor.size = fbc_ctl;
10736 plane->cursor.cntl = cntl;
10737 } else {
10738 I915_WRITE_FW(CURPOS(pipe), pos);
10739 I915_WRITE_FW(CURBASE(pipe), base);
10740 }
10741
10742 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10743 }
10744
10745 static void i9xx_disable_cursor(struct intel_plane *plane,
10746 const struct intel_crtc_state *crtc_state)
10747 {
10748 i9xx_update_cursor(plane, crtc_state, NULL);
10749 }
10750
10751 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10752 enum pipe *pipe)
10753 {
10754 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10755 enum intel_display_power_domain power_domain;
10756 intel_wakeref_t wakeref;
10757 bool ret;
10758 u32 val;
10759
10760 /*
10761 * Not 100% correct for planes that can move between pipes,
10762 * but that's only the case for gen2-3 which don't have any
10763 * display power wells.
10764 */
10765 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10766 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10767 if (!wakeref)
10768 return false;
10769
10770 val = I915_READ(CURCNTR(plane->pipe));
10771
10772 ret = val & MCURSOR_MODE;
10773
10774 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10775 *pipe = plane->pipe;
10776 else
10777 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10778 MCURSOR_PIPE_SELECT_SHIFT;
10779
10780 intel_display_power_put(dev_priv, power_domain, wakeref);
10781
10782 return ret;
10783 }
10784
10785 /* VESA 640x480x72Hz mode to set on the pipe */
10786 static const struct drm_display_mode load_detect_mode = {
10787 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10788 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10789 };
10790
10791 struct drm_framebuffer *
10792 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10793 struct drm_mode_fb_cmd2 *mode_cmd)
10794 {
10795 struct intel_framebuffer *intel_fb;
10796 int ret;
10797
10798 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10799 if (!intel_fb)
10800 return ERR_PTR(-ENOMEM);
10801
10802 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10803 if (ret)
10804 goto err;
10805
10806 return &intel_fb->base;
10807
10808 err:
10809 kfree(intel_fb);
10810 return ERR_PTR(ret);
10811 }
10812
10813 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10814 struct drm_crtc *crtc)
10815 {
10816 struct drm_plane *plane;
10817 struct drm_plane_state *plane_state;
10818 int ret, i;
10819
10820 ret = drm_atomic_add_affected_planes(state, crtc);
10821 if (ret)
10822 return ret;
10823
10824 for_each_new_plane_in_state(state, plane, plane_state, i) {
10825 if (plane_state->crtc != crtc)
10826 continue;
10827
10828 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10829 if (ret)
10830 return ret;
10831
10832 drm_atomic_set_fb_for_plane(plane_state, NULL);
10833 }
10834
10835 return 0;
10836 }
10837
10838 int intel_get_load_detect_pipe(struct drm_connector *connector,
10839 const struct drm_display_mode *mode,
10840 struct intel_load_detect_pipe *old,
10841 struct drm_modeset_acquire_ctx *ctx)
10842 {
10843 struct intel_crtc *intel_crtc;
10844 struct intel_encoder *intel_encoder =
10845 intel_attached_encoder(connector);
10846 struct drm_crtc *possible_crtc;
10847 struct drm_encoder *encoder = &intel_encoder->base;
10848 struct drm_crtc *crtc = NULL;
10849 struct drm_device *dev = encoder->dev;
10850 struct drm_i915_private *dev_priv = to_i915(dev);
10851 struct drm_mode_config *config = &dev->mode_config;
10852 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10853 struct drm_connector_state *connector_state;
10854 struct intel_crtc_state *crtc_state;
10855 int ret, i = -1;
10856
10857 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10858 connector->base.id, connector->name,
10859 encoder->base.id, encoder->name);
10860
10861 old->restore_state = NULL;
10862
10863 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10864
10865 /*
10866 * Algorithm gets a little messy:
10867 *
10868 * - if the connector already has an assigned crtc, use it (but make
10869 * sure it's on first)
10870 *
10871 * - try to find the first unused crtc that can drive this connector,
10872 * and use that if we find one
10873 */
10874
10875 /* See if we already have a CRTC for this connector */
10876 if (connector->state->crtc) {
10877 crtc = connector->state->crtc;
10878
10879 ret = drm_modeset_lock(&crtc->mutex, ctx);
10880 if (ret)
10881 goto fail;
10882
10883 /* Make sure the crtc and connector are running */
10884 goto found;
10885 }
10886
10887 /* Find an unused one (if possible) */
10888 for_each_crtc(dev, possible_crtc) {
10889 i++;
10890 if (!(encoder->possible_crtcs & (1 << i)))
10891 continue;
10892
10893 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10894 if (ret)
10895 goto fail;
10896
10897 if (possible_crtc->state->enable) {
10898 drm_modeset_unlock(&possible_crtc->mutex);
10899 continue;
10900 }
10901
10902 crtc = possible_crtc;
10903 break;
10904 }
10905
10906 /*
10907 * If we didn't find an unused CRTC, don't use any.
10908 */
10909 if (!crtc) {
10910 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10911 ret = -ENODEV;
10912 goto fail;
10913 }
10914
10915 found:
10916 intel_crtc = to_intel_crtc(crtc);
10917
10918 state = drm_atomic_state_alloc(dev);
10919 restore_state = drm_atomic_state_alloc(dev);
10920 if (!state || !restore_state) {
10921 ret = -ENOMEM;
10922 goto fail;
10923 }
10924
10925 state->acquire_ctx = ctx;
10926 restore_state->acquire_ctx = ctx;
10927
10928 connector_state = drm_atomic_get_connector_state(state, connector);
10929 if (IS_ERR(connector_state)) {
10930 ret = PTR_ERR(connector_state);
10931 goto fail;
10932 }
10933
10934 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10935 if (ret)
10936 goto fail;
10937
10938 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10939 if (IS_ERR(crtc_state)) {
10940 ret = PTR_ERR(crtc_state);
10941 goto fail;
10942 }
10943
10944 crtc_state->base.active = crtc_state->base.enable = true;
10945
10946 if (!mode)
10947 mode = &load_detect_mode;
10948
10949 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10950 if (ret)
10951 goto fail;
10952
10953 ret = intel_modeset_disable_planes(state, crtc);
10954 if (ret)
10955 goto fail;
10956
10957 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10958 if (!ret)
10959 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10960 if (!ret)
10961 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10962 if (ret) {
10963 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10964 goto fail;
10965 }
10966
10967 ret = drm_atomic_commit(state);
10968 if (ret) {
10969 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10970 goto fail;
10971 }
10972
10973 old->restore_state = restore_state;
10974 drm_atomic_state_put(state);
10975
10976 /* let the connector get through one full cycle before testing */
10977 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10978 return true;
10979
10980 fail:
10981 if (state) {
10982 drm_atomic_state_put(state);
10983 state = NULL;
10984 }
10985 if (restore_state) {
10986 drm_atomic_state_put(restore_state);
10987 restore_state = NULL;
10988 }
10989
10990 if (ret == -EDEADLK)
10991 return ret;
10992
10993 return false;
10994 }
10995
10996 void intel_release_load_detect_pipe(struct drm_connector *connector,
10997 struct intel_load_detect_pipe *old,
10998 struct drm_modeset_acquire_ctx *ctx)
10999 {
11000 struct intel_encoder *intel_encoder =
11001 intel_attached_encoder(connector);
11002 struct drm_encoder *encoder = &intel_encoder->base;
11003 struct drm_atomic_state *state = old->restore_state;
11004 int ret;
11005
11006 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11007 connector->base.id, connector->name,
11008 encoder->base.id, encoder->name);
11009
11010 if (!state)
11011 return;
11012
11013 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11014 if (ret)
11015 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11016 drm_atomic_state_put(state);
11017 }
11018
11019 static int i9xx_pll_refclk(struct drm_device *dev,
11020 const struct intel_crtc_state *pipe_config)
11021 {
11022 struct drm_i915_private *dev_priv = to_i915(dev);
11023 u32 dpll = pipe_config->dpll_hw_state.dpll;
11024
11025 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11026 return dev_priv->vbt.lvds_ssc_freq;
11027 else if (HAS_PCH_SPLIT(dev_priv))
11028 return 120000;
11029 else if (!IS_GEN(dev_priv, 2))
11030 return 96000;
11031 else
11032 return 48000;
11033 }
11034
11035 /* Returns the clock of the currently programmed mode of the given pipe. */
11036 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11037 struct intel_crtc_state *pipe_config)
11038 {
11039 struct drm_device *dev = crtc->base.dev;
11040 struct drm_i915_private *dev_priv = to_i915(dev);
11041 int pipe = pipe_config->cpu_transcoder;
11042 u32 dpll = pipe_config->dpll_hw_state.dpll;
11043 u32 fp;
11044 struct dpll clock;
11045 int port_clock;
11046 int refclk = i9xx_pll_refclk(dev, pipe_config);
11047
11048 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11049 fp = pipe_config->dpll_hw_state.fp0;
11050 else
11051 fp = pipe_config->dpll_hw_state.fp1;
11052
11053 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11054 if (IS_PINEVIEW(dev_priv)) {
11055 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11056 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11057 } else {
11058 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11059 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11060 }
11061
11062 if (!IS_GEN(dev_priv, 2)) {
11063 if (IS_PINEVIEW(dev_priv))
11064 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11065 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11066 else
11067 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11068 DPLL_FPA01_P1_POST_DIV_SHIFT);
11069
11070 switch (dpll & DPLL_MODE_MASK) {
11071 case DPLLB_MODE_DAC_SERIAL:
11072 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11073 5 : 10;
11074 break;
11075 case DPLLB_MODE_LVDS:
11076 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11077 7 : 14;
11078 break;
11079 default:
11080 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11081 "mode\n", (int)(dpll & DPLL_MODE_MASK));
11082 return;
11083 }
11084
11085 if (IS_PINEVIEW(dev_priv))
11086 port_clock = pnv_calc_dpll_params(refclk, &clock);
11087 else
11088 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11089 } else {
11090 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11091 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11092
11093 if (is_lvds) {
11094 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11095 DPLL_FPA01_P1_POST_DIV_SHIFT);
11096
11097 if (lvds & LVDS_CLKB_POWER_UP)
11098 clock.p2 = 7;
11099 else
11100 clock.p2 = 14;
11101 } else {
11102 if (dpll & PLL_P1_DIVIDE_BY_TWO)
11103 clock.p1 = 2;
11104 else {
11105 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11106 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11107 }
11108 if (dpll & PLL_P2_DIVIDE_BY_4)
11109 clock.p2 = 4;
11110 else
11111 clock.p2 = 2;
11112 }
11113
11114 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11115 }
11116
11117 /*
11118 * This value includes pixel_multiplier. We will use
11119 * port_clock to compute adjusted_mode.crtc_clock in the
11120 * encoder's get_config() function.
11121 */
11122 pipe_config->port_clock = port_clock;
11123 }
11124
11125 int intel_dotclock_calculate(int link_freq,
11126 const struct intel_link_m_n *m_n)
11127 {
11128 /*
11129 * The calculation for the data clock is:
11130 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11131 * But we want to avoid losing precison if possible, so:
11132 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11133 *
11134 * and the link clock is simpler:
11135 * link_clock = (m * link_clock) / n
11136 */
11137
11138 if (!m_n->link_n)
11139 return 0;
11140
11141 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11142 }
11143
11144 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11145 struct intel_crtc_state *pipe_config)
11146 {
11147 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11148
11149 /* read out port_clock from the DPLL */
11150 i9xx_crtc_clock_get(crtc, pipe_config);
11151
11152 /*
11153 * In case there is an active pipe without active ports,
11154 * we may need some idea for the dotclock anyway.
11155 * Calculate one based on the FDI configuration.
11156 */
11157 pipe_config->base.adjusted_mode.crtc_clock =
11158 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11159 &pipe_config->fdi_m_n);
11160 }
11161
11162 /* Returns the currently programmed mode of the given encoder. */
11163 struct drm_display_mode *
11164 intel_encoder_current_mode(struct intel_encoder *encoder)
11165 {
11166 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11167 struct intel_crtc_state *crtc_state;
11168 struct drm_display_mode *mode;
11169 struct intel_crtc *crtc;
11170 enum pipe pipe;
11171
11172 if (!encoder->get_hw_state(encoder, &pipe))
11173 return NULL;
11174
11175 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11176
11177 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11178 if (!mode)
11179 return NULL;
11180
11181 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11182 if (!crtc_state) {
11183 kfree(mode);
11184 return NULL;
11185 }
11186
11187 crtc_state->base.crtc = &crtc->base;
11188
11189 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11190 kfree(crtc_state);
11191 kfree(mode);
11192 return NULL;
11193 }
11194
11195 encoder->get_config(encoder, crtc_state);
11196
11197 intel_mode_from_pipe_config(mode, crtc_state);
11198
11199 kfree(crtc_state);
11200
11201 return mode;
11202 }
11203
11204 static void intel_crtc_destroy(struct drm_crtc *crtc)
11205 {
11206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11207
11208 drm_crtc_cleanup(crtc);
11209 kfree(intel_crtc);
11210 }
11211
11212 /**
11213 * intel_wm_need_update - Check whether watermarks need updating
11214 * @cur: current plane state
11215 * @new: new plane state
11216 *
11217 * Check current plane state versus the new one to determine whether
11218 * watermarks need to be recalculated.
11219 *
11220 * Returns true or false.
11221 */
11222 static bool intel_wm_need_update(struct intel_plane_state *cur,
11223 struct intel_plane_state *new)
11224 {
11225 /* Update watermarks on tiling or size changes. */
11226 if (new->base.visible != cur->base.visible)
11227 return true;
11228
11229 if (!cur->base.fb || !new->base.fb)
11230 return false;
11231
11232 if (cur->base.fb->modifier != new->base.fb->modifier ||
11233 cur->base.rotation != new->base.rotation ||
11234 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11235 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11236 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11237 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11238 return true;
11239
11240 return false;
11241 }
11242
11243 static bool needs_scaling(const struct intel_plane_state *state)
11244 {
11245 int src_w = drm_rect_width(&state->base.src) >> 16;
11246 int src_h = drm_rect_height(&state->base.src) >> 16;
11247 int dst_w = drm_rect_width(&state->base.dst);
11248 int dst_h = drm_rect_height(&state->base.dst);
11249
11250 return (src_w != dst_w || src_h != dst_h);
11251 }
11252
11253 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11254 struct drm_crtc_state *crtc_state,
11255 const struct intel_plane_state *old_plane_state,
11256 struct drm_plane_state *plane_state)
11257 {
11258 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11259 struct drm_crtc *crtc = crtc_state->crtc;
11260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11261 struct intel_plane *plane = to_intel_plane(plane_state->plane);
11262 struct drm_device *dev = crtc->dev;
11263 struct drm_i915_private *dev_priv = to_i915(dev);
11264 bool mode_changed = needs_modeset(crtc_state);
11265 bool was_crtc_enabled = old_crtc_state->base.active;
11266 bool is_crtc_enabled = crtc_state->active;
11267 bool turn_off, turn_on, visible, was_visible;
11268 struct drm_framebuffer *fb = plane_state->fb;
11269 int ret;
11270
11271 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11272 ret = skl_update_scaler_plane(
11273 to_intel_crtc_state(crtc_state),
11274 to_intel_plane_state(plane_state));
11275 if (ret)
11276 return ret;
11277 }
11278
11279 was_visible = old_plane_state->base.visible;
11280 visible = plane_state->visible;
11281
11282 if (!was_crtc_enabled && WARN_ON(was_visible))
11283 was_visible = false;
11284
11285 /*
11286 * Visibility is calculated as if the crtc was on, but
11287 * after scaler setup everything depends on it being off
11288 * when the crtc isn't active.
11289 *
11290 * FIXME this is wrong for watermarks. Watermarks should also
11291 * be computed as if the pipe would be active. Perhaps move
11292 * per-plane wm computation to the .check_plane() hook, and
11293 * only combine the results from all planes in the current place?
11294 */
11295 if (!is_crtc_enabled) {
11296 plane_state->visible = visible = false;
11297 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11298 to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
11299 }
11300
11301 if (!was_visible && !visible)
11302 return 0;
11303
11304 if (fb != old_plane_state->base.fb)
11305 pipe_config->fb_changed = true;
11306
11307 turn_off = was_visible && (!visible || mode_changed);
11308 turn_on = visible && (!was_visible || mode_changed);
11309
11310 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11311 intel_crtc->base.base.id, intel_crtc->base.name,
11312 plane->base.base.id, plane->base.name,
11313 fb ? fb->base.id : -1);
11314
11315 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11316 plane->base.base.id, plane->base.name,
11317 was_visible, visible,
11318 turn_off, turn_on, mode_changed);
11319
11320 if (turn_on) {
11321 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11322 pipe_config->update_wm_pre = true;
11323
11324 /* must disable cxsr around plane enable/disable */
11325 if (plane->id != PLANE_CURSOR)
11326 pipe_config->disable_cxsr = true;
11327 } else if (turn_off) {
11328 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11329 pipe_config->update_wm_post = true;
11330
11331 /* must disable cxsr around plane enable/disable */
11332 if (plane->id != PLANE_CURSOR)
11333 pipe_config->disable_cxsr = true;
11334 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11335 to_intel_plane_state(plane_state))) {
11336 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11337 /* FIXME bollocks */
11338 pipe_config->update_wm_pre = true;
11339 pipe_config->update_wm_post = true;
11340 }
11341 }
11342
11343 if (visible || was_visible)
11344 pipe_config->fb_bits |= plane->frontbuffer_bit;
11345
11346 /*
11347 * ILK/SNB DVSACNTR/Sprite Enable
11348 * IVB SPR_CTL/Sprite Enable
11349 * "When in Self Refresh Big FIFO mode, a write to enable the
11350 * plane will be internally buffered and delayed while Big FIFO
11351 * mode is exiting."
11352 *
11353 * Which means that enabling the sprite can take an extra frame
11354 * when we start in big FIFO mode (LP1+). Thus we need to drop
11355 * down to LP0 and wait for vblank in order to make sure the
11356 * sprite gets enabled on the next vblank after the register write.
11357 * Doing otherwise would risk enabling the sprite one frame after
11358 * we've already signalled flip completion. We can resume LP1+
11359 * once the sprite has been enabled.
11360 *
11361 *
11362 * WaCxSRDisabledForSpriteScaling:ivb
11363 * IVB SPR_SCALE/Scaling Enable
11364 * "Low Power watermarks must be disabled for at least one
11365 * frame before enabling sprite scaling, and kept disabled
11366 * until sprite scaling is disabled."
11367 *
11368 * ILK/SNB DVSASCALE/Scaling Enable
11369 * "When in Self Refresh Big FIFO mode, scaling enable will be
11370 * masked off while Big FIFO mode is exiting."
11371 *
11372 * Despite the w/a only being listed for IVB we assume that
11373 * the ILK/SNB note has similar ramifications, hence we apply
11374 * the w/a on all three platforms.
11375 *
11376 * With experimental results seems this is needed also for primary
11377 * plane, not only sprite plane.
11378 */
11379 if (plane->id != PLANE_CURSOR &&
11380 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11381 IS_IVYBRIDGE(dev_priv)) &&
11382 (turn_on || (!needs_scaling(old_plane_state) &&
11383 needs_scaling(to_intel_plane_state(plane_state)))))
11384 pipe_config->disable_lp_wm = true;
11385
11386 return 0;
11387 }
11388
11389 static bool encoders_cloneable(const struct intel_encoder *a,
11390 const struct intel_encoder *b)
11391 {
11392 /* masks could be asymmetric, so check both ways */
11393 return a == b || (a->cloneable & (1 << b->type) &&
11394 b->cloneable & (1 << a->type));
11395 }
11396
11397 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11398 struct intel_crtc *crtc,
11399 struct intel_encoder *encoder)
11400 {
11401 struct intel_encoder *source_encoder;
11402 struct drm_connector *connector;
11403 struct drm_connector_state *connector_state;
11404 int i;
11405
11406 for_each_new_connector_in_state(state, connector, connector_state, i) {
11407 if (connector_state->crtc != &crtc->base)
11408 continue;
11409
11410 source_encoder =
11411 to_intel_encoder(connector_state->best_encoder);
11412 if (!encoders_cloneable(encoder, source_encoder))
11413 return false;
11414 }
11415
11416 return true;
11417 }
11418
11419 static int icl_add_linked_planes(struct intel_atomic_state *state)
11420 {
11421 struct intel_plane *plane, *linked;
11422 struct intel_plane_state *plane_state, *linked_plane_state;
11423 int i;
11424
11425 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11426 linked = plane_state->linked_plane;
11427
11428 if (!linked)
11429 continue;
11430
11431 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11432 if (IS_ERR(linked_plane_state))
11433 return PTR_ERR(linked_plane_state);
11434
11435 WARN_ON(linked_plane_state->linked_plane != plane);
11436 WARN_ON(linked_plane_state->slave == plane_state->slave);
11437 }
11438
11439 return 0;
11440 }
11441
11442 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11443 {
11444 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11445 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11446 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11447 struct intel_plane *plane, *linked;
11448 struct intel_plane_state *plane_state;
11449 int i;
11450
11451 if (INTEL_GEN(dev_priv) < 11)
11452 return 0;
11453
11454 /*
11455 * Destroy all old plane links and make the slave plane invisible
11456 * in the crtc_state->active_planes mask.
11457 */
11458 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11459 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11460 continue;
11461
11462 plane_state->linked_plane = NULL;
11463 if (plane_state->slave && !plane_state->base.visible) {
11464 crtc_state->active_planes &= ~BIT(plane->id);
11465 crtc_state->update_planes |= BIT(plane->id);
11466 }
11467
11468 plane_state->slave = false;
11469 }
11470
11471 if (!crtc_state->nv12_planes)
11472 return 0;
11473
11474 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11475 struct intel_plane_state *linked_state = NULL;
11476
11477 if (plane->pipe != crtc->pipe ||
11478 !(crtc_state->nv12_planes & BIT(plane->id)))
11479 continue;
11480
11481 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11482 if (!icl_is_nv12_y_plane(linked->id))
11483 continue;
11484
11485 if (crtc_state->active_planes & BIT(linked->id))
11486 continue;
11487
11488 linked_state = intel_atomic_get_plane_state(state, linked);
11489 if (IS_ERR(linked_state))
11490 return PTR_ERR(linked_state);
11491
11492 break;
11493 }
11494
11495 if (!linked_state) {
11496 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11497 hweight8(crtc_state->nv12_planes));
11498
11499 return -EINVAL;
11500 }
11501
11502 plane_state->linked_plane = linked;
11503
11504 linked_state->slave = true;
11505 linked_state->linked_plane = plane;
11506 crtc_state->active_planes |= BIT(linked->id);
11507 crtc_state->update_planes |= BIT(linked->id);
11508 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11509 }
11510
11511 return 0;
11512 }
11513
11514 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11515 {
11516 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11517 struct intel_atomic_state *state =
11518 to_intel_atomic_state(new_crtc_state->base.state);
11519 const struct intel_crtc_state *old_crtc_state =
11520 intel_atomic_get_old_crtc_state(state, crtc);
11521
11522 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11523 }
11524
11525 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11526 struct drm_crtc_state *crtc_state)
11527 {
11528 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11529 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11530 struct intel_crtc_state *pipe_config =
11531 to_intel_crtc_state(crtc_state);
11532 int ret;
11533 bool mode_changed = needs_modeset(crtc_state);
11534
11535 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11536 mode_changed && !crtc_state->active)
11537 pipe_config->update_wm_post = true;
11538
11539 if (mode_changed && crtc_state->enable &&
11540 dev_priv->display.crtc_compute_clock &&
11541 !WARN_ON(pipe_config->shared_dpll)) {
11542 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11543 pipe_config);
11544 if (ret)
11545 return ret;
11546 }
11547
11548 /*
11549 * May need to update pipe gamma enable bits
11550 * when C8 planes are getting enabled/disabled.
11551 */
11552 if (c8_planes_changed(pipe_config))
11553 crtc_state->color_mgmt_changed = true;
11554
11555 if (mode_changed || pipe_config->update_pipe ||
11556 crtc_state->color_mgmt_changed) {
11557 ret = intel_color_check(pipe_config);
11558 if (ret)
11559 return ret;
11560 }
11561
11562 ret = 0;
11563 if (dev_priv->display.compute_pipe_wm) {
11564 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11565 if (ret) {
11566 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11567 return ret;
11568 }
11569 }
11570
11571 if (dev_priv->display.compute_intermediate_wm) {
11572 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11573 return 0;
11574
11575 /*
11576 * Calculate 'intermediate' watermarks that satisfy both the
11577 * old state and the new state. We can program these
11578 * immediately.
11579 */
11580 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11581 if (ret) {
11582 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11583 return ret;
11584 }
11585 }
11586
11587 if (INTEL_GEN(dev_priv) >= 9) {
11588 if (mode_changed || pipe_config->update_pipe)
11589 ret = skl_update_scaler_crtc(pipe_config);
11590
11591 if (!ret)
11592 ret = icl_check_nv12_planes(pipe_config);
11593 if (!ret)
11594 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11595 pipe_config);
11596 if (!ret)
11597 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11598 pipe_config);
11599 }
11600
11601 if (HAS_IPS(dev_priv))
11602 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11603
11604 return ret;
11605 }
11606
11607 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11608 .atomic_check = intel_crtc_atomic_check,
11609 };
11610
11611 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11612 {
11613 struct intel_connector *connector;
11614 struct drm_connector_list_iter conn_iter;
11615
11616 drm_connector_list_iter_begin(dev, &conn_iter);
11617 for_each_intel_connector_iter(connector, &conn_iter) {
11618 if (connector->base.state->crtc)
11619 drm_connector_put(&connector->base);
11620
11621 if (connector->base.encoder) {
11622 connector->base.state->best_encoder =
11623 connector->base.encoder;
11624 connector->base.state->crtc =
11625 connector->base.encoder->crtc;
11626
11627 drm_connector_get(&connector->base);
11628 } else {
11629 connector->base.state->best_encoder = NULL;
11630 connector->base.state->crtc = NULL;
11631 }
11632 }
11633 drm_connector_list_iter_end(&conn_iter);
11634 }
11635
11636 static int
11637 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11638 struct intel_crtc_state *pipe_config)
11639 {
11640 struct drm_connector *connector = conn_state->connector;
11641 const struct drm_display_info *info = &connector->display_info;
11642 int bpp;
11643
11644 switch (conn_state->max_bpc) {
11645 case 6 ... 7:
11646 bpp = 6 * 3;
11647 break;
11648 case 8 ... 9:
11649 bpp = 8 * 3;
11650 break;
11651 case 10 ... 11:
11652 bpp = 10 * 3;
11653 break;
11654 case 12:
11655 bpp = 12 * 3;
11656 break;
11657 default:
11658 return -EINVAL;
11659 }
11660
11661 if (bpp < pipe_config->pipe_bpp) {
11662 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11663 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11664 connector->base.id, connector->name,
11665 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11666 pipe_config->pipe_bpp);
11667
11668 pipe_config->pipe_bpp = bpp;
11669 }
11670
11671 return 0;
11672 }
11673
11674 static int
11675 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11676 struct intel_crtc_state *pipe_config)
11677 {
11678 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11679 struct drm_atomic_state *state = pipe_config->base.state;
11680 struct drm_connector *connector;
11681 struct drm_connector_state *connector_state;
11682 int bpp, i;
11683
11684 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11685 IS_CHERRYVIEW(dev_priv)))
11686 bpp = 10*3;
11687 else if (INTEL_GEN(dev_priv) >= 5)
11688 bpp = 12*3;
11689 else
11690 bpp = 8*3;
11691
11692 pipe_config->pipe_bpp = bpp;
11693
11694 /* Clamp display bpp to connector max bpp */
11695 for_each_new_connector_in_state(state, connector, connector_state, i) {
11696 int ret;
11697
11698 if (connector_state->crtc != &crtc->base)
11699 continue;
11700
11701 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11702 if (ret)
11703 return ret;
11704 }
11705
11706 return 0;
11707 }
11708
11709 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11710 {
11711 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11712 "type: 0x%x flags: 0x%x\n",
11713 mode->crtc_clock,
11714 mode->crtc_hdisplay, mode->crtc_hsync_start,
11715 mode->crtc_hsync_end, mode->crtc_htotal,
11716 mode->crtc_vdisplay, mode->crtc_vsync_start,
11717 mode->crtc_vsync_end, mode->crtc_vtotal,
11718 mode->type, mode->flags);
11719 }
11720
11721 static inline void
11722 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
11723 const char *id, unsigned int lane_count,
11724 const struct intel_link_m_n *m_n)
11725 {
11726 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11727 id, lane_count,
11728 m_n->gmch_m, m_n->gmch_n,
11729 m_n->link_m, m_n->link_n, m_n->tu);
11730 }
11731
11732 static void
11733 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11734 const union hdmi_infoframe *frame)
11735 {
11736 if ((drm_debug & DRM_UT_KMS) == 0)
11737 return;
11738
11739 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11740 }
11741
11742 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11743
11744 static const char * const output_type_str[] = {
11745 OUTPUT_TYPE(UNUSED),
11746 OUTPUT_TYPE(ANALOG),
11747 OUTPUT_TYPE(DVO),
11748 OUTPUT_TYPE(SDVO),
11749 OUTPUT_TYPE(LVDS),
11750 OUTPUT_TYPE(TVOUT),
11751 OUTPUT_TYPE(HDMI),
11752 OUTPUT_TYPE(DP),
11753 OUTPUT_TYPE(EDP),
11754 OUTPUT_TYPE(DSI),
11755 OUTPUT_TYPE(DDI),
11756 OUTPUT_TYPE(DP_MST),
11757 };
11758
11759 #undef OUTPUT_TYPE
11760
11761 static void snprintf_output_types(char *buf, size_t len,
11762 unsigned int output_types)
11763 {
11764 char *str = buf;
11765 int i;
11766
11767 str[0] = '\0';
11768
11769 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11770 int r;
11771
11772 if ((output_types & BIT(i)) == 0)
11773 continue;
11774
11775 r = snprintf(str, len, "%s%s",
11776 str != buf ? "," : "", output_type_str[i]);
11777 if (r >= len)
11778 break;
11779 str += r;
11780 len -= r;
11781
11782 output_types &= ~BIT(i);
11783 }
11784
11785 WARN_ON_ONCE(output_types != 0);
11786 }
11787
11788 static const char * const output_format_str[] = {
11789 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11790 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11791 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11792 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11793 };
11794
11795 static const char *output_formats(enum intel_output_format format)
11796 {
11797 if (format >= ARRAY_SIZE(output_format_str))
11798 format = INTEL_OUTPUT_FORMAT_INVALID;
11799 return output_format_str[format];
11800 }
11801
11802 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
11803 {
11804 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11805 const struct drm_framebuffer *fb = plane_state->base.fb;
11806 struct drm_format_name_buf format_name;
11807
11808 if (!fb) {
11809 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
11810 plane->base.base.id, plane->base.name,
11811 yesno(plane_state->base.visible));
11812 return;
11813 }
11814
11815 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
11816 plane->base.base.id, plane->base.name,
11817 fb->base.id, fb->width, fb->height,
11818 drm_get_format_name(fb->format->format, &format_name),
11819 yesno(plane_state->base.visible));
11820 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
11821 plane_state->base.rotation, plane_state->scaler_id);
11822 if (plane_state->base.visible)
11823 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
11824 DRM_RECT_FP_ARG(&plane_state->base.src),
11825 DRM_RECT_ARG(&plane_state->base.dst));
11826 }
11827
11828 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
11829 struct intel_atomic_state *state,
11830 const char *context)
11831 {
11832 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
11833 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11834 const struct intel_plane_state *plane_state;
11835 struct intel_plane *plane;
11836 char buf[64];
11837 int i;
11838
11839 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
11840 crtc->base.base.id, crtc->base.name,
11841 yesno(pipe_config->base.enable), context);
11842
11843 if (!pipe_config->base.enable)
11844 goto dump_planes;
11845
11846 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11847 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
11848 yesno(pipe_config->base.active),
11849 buf, pipe_config->output_types,
11850 output_formats(pipe_config->output_format));
11851
11852 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11853 transcoder_name(pipe_config->cpu_transcoder),
11854 pipe_config->pipe_bpp, pipe_config->dither);
11855
11856 if (pipe_config->has_pch_encoder)
11857 intel_dump_m_n_config(pipe_config, "fdi",
11858 pipe_config->fdi_lanes,
11859 &pipe_config->fdi_m_n);
11860
11861 if (intel_crtc_has_dp_encoder(pipe_config)) {
11862 intel_dump_m_n_config(pipe_config, "dp m_n",
11863 pipe_config->lane_count, &pipe_config->dp_m_n);
11864 if (pipe_config->has_drrs)
11865 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11866 pipe_config->lane_count,
11867 &pipe_config->dp_m2_n2);
11868 }
11869
11870 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
11871 pipe_config->has_audio, pipe_config->has_infoframe,
11872 pipe_config->infoframes.enable);
11873
11874 if (pipe_config->infoframes.enable &
11875 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11876 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11877 if (pipe_config->infoframes.enable &
11878 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11879 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11880 if (pipe_config->infoframes.enable &
11881 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11882 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11883 if (pipe_config->infoframes.enable &
11884 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11885 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11886
11887 DRM_DEBUG_KMS("requested mode:\n");
11888 drm_mode_debug_printmodeline(&pipe_config->base.mode);
11889 DRM_DEBUG_KMS("adjusted mode:\n");
11890 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11891 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11892 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11893 pipe_config->port_clock,
11894 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11895 pipe_config->pixel_rate);
11896
11897 if (INTEL_GEN(dev_priv) >= 9)
11898 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11899 crtc->num_scalers,
11900 pipe_config->scaler_state.scaler_users,
11901 pipe_config->scaler_state.scaler_id);
11902
11903 if (HAS_GMCH(dev_priv))
11904 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11905 pipe_config->gmch_pfit.control,
11906 pipe_config->gmch_pfit.pgm_ratios,
11907 pipe_config->gmch_pfit.lvds_border_bits);
11908 else
11909 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
11910 pipe_config->pch_pfit.pos,
11911 pipe_config->pch_pfit.size,
11912 enableddisabled(pipe_config->pch_pfit.enabled),
11913 yesno(pipe_config->pch_pfit.force_thru));
11914
11915 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11916 pipe_config->ips_enabled, pipe_config->double_wide);
11917
11918 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11919
11920 dump_planes:
11921 if (!state)
11922 return;
11923
11924 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11925 if (plane->pipe == crtc->pipe)
11926 intel_dump_plane_state(plane_state);
11927 }
11928 }
11929
11930 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
11931 {
11932 struct drm_device *dev = state->base.dev;
11933 struct drm_connector *connector;
11934 struct drm_connector_list_iter conn_iter;
11935 unsigned int used_ports = 0;
11936 unsigned int used_mst_ports = 0;
11937 bool ret = true;
11938
11939 /*
11940 * Walk the connector list instead of the encoder
11941 * list to detect the problem on ddi platforms
11942 * where there's just one encoder per digital port.
11943 */
11944 drm_connector_list_iter_begin(dev, &conn_iter);
11945 drm_for_each_connector_iter(connector, &conn_iter) {
11946 struct drm_connector_state *connector_state;
11947 struct intel_encoder *encoder;
11948
11949 connector_state =
11950 drm_atomic_get_new_connector_state(&state->base,
11951 connector);
11952 if (!connector_state)
11953 connector_state = connector->state;
11954
11955 if (!connector_state->best_encoder)
11956 continue;
11957
11958 encoder = to_intel_encoder(connector_state->best_encoder);
11959
11960 WARN_ON(!connector_state->crtc);
11961
11962 switch (encoder->type) {
11963 unsigned int port_mask;
11964 case INTEL_OUTPUT_DDI:
11965 if (WARN_ON(!HAS_DDI(to_i915(dev))))
11966 break;
11967 /* else: fall through */
11968 case INTEL_OUTPUT_DP:
11969 case INTEL_OUTPUT_HDMI:
11970 case INTEL_OUTPUT_EDP:
11971 port_mask = 1 << encoder->port;
11972
11973 /* the same port mustn't appear more than once */
11974 if (used_ports & port_mask)
11975 ret = false;
11976
11977 used_ports |= port_mask;
11978 break;
11979 case INTEL_OUTPUT_DP_MST:
11980 used_mst_ports |=
11981 1 << encoder->port;
11982 break;
11983 default:
11984 break;
11985 }
11986 }
11987 drm_connector_list_iter_end(&conn_iter);
11988
11989 /* can't mix MST and SST/HDMI on the same port */
11990 if (used_ports & used_mst_ports)
11991 return false;
11992
11993 return ret;
11994 }
11995
11996 static int
11997 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11998 {
11999 struct drm_i915_private *dev_priv =
12000 to_i915(crtc_state->base.crtc->dev);
12001 struct intel_crtc_state *saved_state;
12002
12003 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12004 if (!saved_state)
12005 return -ENOMEM;
12006
12007 /* FIXME: before the switch to atomic started, a new pipe_config was
12008 * kzalloc'd. Code that depends on any field being zero should be
12009 * fixed, so that the crtc_state can be safely duplicated. For now,
12010 * only fields that are know to not cause problems are preserved. */
12011
12012 saved_state->scaler_state = crtc_state->scaler_state;
12013 saved_state->shared_dpll = crtc_state->shared_dpll;
12014 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12015 saved_state->crc_enabled = crtc_state->crc_enabled;
12016 if (IS_G4X(dev_priv) ||
12017 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12018 saved_state->wm = crtc_state->wm;
12019
12020 /* Keep base drm_crtc_state intact, only clear our extended struct */
12021 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12022 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12023 sizeof(*crtc_state) - sizeof(crtc_state->base));
12024
12025 kfree(saved_state);
12026 return 0;
12027 }
12028
12029 static int
12030 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12031 {
12032 struct drm_crtc *crtc = pipe_config->base.crtc;
12033 struct drm_atomic_state *state = pipe_config->base.state;
12034 struct intel_encoder *encoder;
12035 struct drm_connector *connector;
12036 struct drm_connector_state *connector_state;
12037 int base_bpp, ret;
12038 int i;
12039 bool retry = true;
12040
12041 ret = clear_intel_crtc_state(pipe_config);
12042 if (ret)
12043 return ret;
12044
12045 pipe_config->cpu_transcoder =
12046 (enum transcoder) to_intel_crtc(crtc)->pipe;
12047
12048 /*
12049 * Sanitize sync polarity flags based on requested ones. If neither
12050 * positive or negative polarity is requested, treat this as meaning
12051 * negative polarity.
12052 */
12053 if (!(pipe_config->base.adjusted_mode.flags &
12054 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12055 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12056
12057 if (!(pipe_config->base.adjusted_mode.flags &
12058 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12059 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12060
12061 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12062 pipe_config);
12063 if (ret)
12064 return ret;
12065
12066 base_bpp = pipe_config->pipe_bpp;
12067
12068 /*
12069 * Determine the real pipe dimensions. Note that stereo modes can
12070 * increase the actual pipe size due to the frame doubling and
12071 * insertion of additional space for blanks between the frame. This
12072 * is stored in the crtc timings. We use the requested mode to do this
12073 * computation to clearly distinguish it from the adjusted mode, which
12074 * can be changed by the connectors in the below retry loop.
12075 */
12076 drm_mode_get_hv_timing(&pipe_config->base.mode,
12077 &pipe_config->pipe_src_w,
12078 &pipe_config->pipe_src_h);
12079
12080 for_each_new_connector_in_state(state, connector, connector_state, i) {
12081 if (connector_state->crtc != crtc)
12082 continue;
12083
12084 encoder = to_intel_encoder(connector_state->best_encoder);
12085
12086 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12087 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12088 return -EINVAL;
12089 }
12090
12091 /*
12092 * Determine output_types before calling the .compute_config()
12093 * hooks so that the hooks can use this information safely.
12094 */
12095 if (encoder->compute_output_type)
12096 pipe_config->output_types |=
12097 BIT(encoder->compute_output_type(encoder, pipe_config,
12098 connector_state));
12099 else
12100 pipe_config->output_types |= BIT(encoder->type);
12101 }
12102
12103 encoder_retry:
12104 /* Ensure the port clock defaults are reset when retrying. */
12105 pipe_config->port_clock = 0;
12106 pipe_config->pixel_multiplier = 1;
12107
12108 /* Fill in default crtc timings, allow encoders to overwrite them. */
12109 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12110 CRTC_STEREO_DOUBLE);
12111
12112 /* Pass our mode to the connectors and the CRTC to give them a chance to
12113 * adjust it according to limitations or connector properties, and also
12114 * a chance to reject the mode entirely.
12115 */
12116 for_each_new_connector_in_state(state, connector, connector_state, i) {
12117 if (connector_state->crtc != crtc)
12118 continue;
12119
12120 encoder = to_intel_encoder(connector_state->best_encoder);
12121 ret = encoder->compute_config(encoder, pipe_config,
12122 connector_state);
12123 if (ret < 0) {
12124 if (ret != -EDEADLK)
12125 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12126 ret);
12127 return ret;
12128 }
12129 }
12130
12131 /* Set default port clock if not overwritten by the encoder. Needs to be
12132 * done afterwards in case the encoder adjusts the mode. */
12133 if (!pipe_config->port_clock)
12134 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12135 * pipe_config->pixel_multiplier;
12136
12137 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12138 if (ret == -EDEADLK)
12139 return ret;
12140 if (ret < 0) {
12141 DRM_DEBUG_KMS("CRTC fixup failed\n");
12142 return ret;
12143 }
12144
12145 if (ret == RETRY) {
12146 if (WARN(!retry, "loop in pipe configuration computation\n"))
12147 return -EINVAL;
12148
12149 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12150 retry = false;
12151 goto encoder_retry;
12152 }
12153
12154 /* Dithering seems to not pass-through bits correctly when it should, so
12155 * only enable it on 6bpc panels and when its not a compliance
12156 * test requesting 6bpc video pattern.
12157 */
12158 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12159 !pipe_config->dither_force_disable;
12160 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12161 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12162
12163 return 0;
12164 }
12165
12166 bool intel_fuzzy_clock_check(int clock1, int clock2)
12167 {
12168 int diff;
12169
12170 if (clock1 == clock2)
12171 return true;
12172
12173 if (!clock1 || !clock2)
12174 return false;
12175
12176 diff = abs(clock1 - clock2);
12177
12178 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12179 return true;
12180
12181 return false;
12182 }
12183
12184 static bool
12185 intel_compare_m_n(unsigned int m, unsigned int n,
12186 unsigned int m2, unsigned int n2,
12187 bool exact)
12188 {
12189 if (m == m2 && n == n2)
12190 return true;
12191
12192 if (exact || !m || !n || !m2 || !n2)
12193 return false;
12194
12195 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12196
12197 if (n > n2) {
12198 while (n > n2) {
12199 m2 <<= 1;
12200 n2 <<= 1;
12201 }
12202 } else if (n < n2) {
12203 while (n < n2) {
12204 m <<= 1;
12205 n <<= 1;
12206 }
12207 }
12208
12209 if (n != n2)
12210 return false;
12211
12212 return intel_fuzzy_clock_check(m, m2);
12213 }
12214
12215 static bool
12216 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12217 struct intel_link_m_n *m2_n2,
12218 bool adjust)
12219 {
12220 if (m_n->tu == m2_n2->tu &&
12221 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12222 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12223 intel_compare_m_n(m_n->link_m, m_n->link_n,
12224 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12225 if (adjust)
12226 *m2_n2 = *m_n;
12227
12228 return true;
12229 }
12230
12231 return false;
12232 }
12233
12234 static bool
12235 intel_compare_infoframe(const union hdmi_infoframe *a,
12236 const union hdmi_infoframe *b)
12237 {
12238 return memcmp(a, b, sizeof(*a)) == 0;
12239 }
12240
12241 static void
12242 pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
12243 bool adjust, const char *name,
12244 const union hdmi_infoframe *a,
12245 const union hdmi_infoframe *b)
12246 {
12247 if (adjust) {
12248 if ((drm_debug & DRM_UT_KMS) == 0)
12249 return;
12250
12251 drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
12252 drm_dbg(DRM_UT_KMS, "expected:");
12253 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12254 drm_dbg(DRM_UT_KMS, "found");
12255 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12256 } else {
12257 drm_err("mismatch in %s infoframe", name);
12258 drm_err("expected:");
12259 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12260 drm_err("found");
12261 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12262 }
12263 }
12264
12265 static void __printf(3, 4)
12266 pipe_config_err(bool adjust, const char *name, const char *format, ...)
12267 {
12268 struct va_format vaf;
12269 va_list args;
12270
12271 va_start(args, format);
12272 vaf.fmt = format;
12273 vaf.va = &args;
12274
12275 if (adjust)
12276 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
12277 else
12278 drm_err("mismatch in %s %pV", name, &vaf);
12279
12280 va_end(args);
12281 }
12282
12283 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12284 {
12285 if (i915_modparams.fastboot != -1)
12286 return i915_modparams.fastboot;
12287
12288 /* Enable fastboot by default on Skylake and newer */
12289 if (INTEL_GEN(dev_priv) >= 9)
12290 return true;
12291
12292 /* Enable fastboot by default on VLV and CHV */
12293 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12294 return true;
12295
12296 /* Disabled by default on all others */
12297 return false;
12298 }
12299
12300 static bool
12301 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12302 struct intel_crtc_state *current_config,
12303 struct intel_crtc_state *pipe_config,
12304 bool adjust)
12305 {
12306 bool ret = true;
12307 bool fixup_inherited = adjust &&
12308 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12309 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12310
12311 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12312 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12313 ret = false;
12314 }
12315
12316 #define PIPE_CONF_CHECK_X(name) do { \
12317 if (current_config->name != pipe_config->name) { \
12318 pipe_config_err(adjust, __stringify(name), \
12319 "(expected 0x%08x, found 0x%08x)\n", \
12320 current_config->name, \
12321 pipe_config->name); \
12322 ret = false; \
12323 } \
12324 } while (0)
12325
12326 #define PIPE_CONF_CHECK_I(name) do { \
12327 if (current_config->name != pipe_config->name) { \
12328 pipe_config_err(adjust, __stringify(name), \
12329 "(expected %i, found %i)\n", \
12330 current_config->name, \
12331 pipe_config->name); \
12332 ret = false; \
12333 } \
12334 } while (0)
12335
12336 #define PIPE_CONF_CHECK_BOOL(name) do { \
12337 if (current_config->name != pipe_config->name) { \
12338 pipe_config_err(adjust, __stringify(name), \
12339 "(expected %s, found %s)\n", \
12340 yesno(current_config->name), \
12341 yesno(pipe_config->name)); \
12342 ret = false; \
12343 } \
12344 } while (0)
12345
12346 /*
12347 * Checks state where we only read out the enabling, but not the entire
12348 * state itself (like full infoframes or ELD for audio). These states
12349 * require a full modeset on bootup to fix up.
12350 */
12351 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12352 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12353 PIPE_CONF_CHECK_BOOL(name); \
12354 } else { \
12355 pipe_config_err(adjust, __stringify(name), \
12356 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12357 yesno(current_config->name), \
12358 yesno(pipe_config->name)); \
12359 ret = false; \
12360 } \
12361 } while (0)
12362
12363 #define PIPE_CONF_CHECK_P(name) do { \
12364 if (current_config->name != pipe_config->name) { \
12365 pipe_config_err(adjust, __stringify(name), \
12366 "(expected %p, found %p)\n", \
12367 current_config->name, \
12368 pipe_config->name); \
12369 ret = false; \
12370 } \
12371 } while (0)
12372
12373 #define PIPE_CONF_CHECK_M_N(name) do { \
12374 if (!intel_compare_link_m_n(&current_config->name, \
12375 &pipe_config->name,\
12376 adjust)) { \
12377 pipe_config_err(adjust, __stringify(name), \
12378 "(expected tu %i gmch %i/%i link %i/%i, " \
12379 "found tu %i, gmch %i/%i link %i/%i)\n", \
12380 current_config->name.tu, \
12381 current_config->name.gmch_m, \
12382 current_config->name.gmch_n, \
12383 current_config->name.link_m, \
12384 current_config->name.link_n, \
12385 pipe_config->name.tu, \
12386 pipe_config->name.gmch_m, \
12387 pipe_config->name.gmch_n, \
12388 pipe_config->name.link_m, \
12389 pipe_config->name.link_n); \
12390 ret = false; \
12391 } \
12392 } while (0)
12393
12394 /* This is required for BDW+ where there is only one set of registers for
12395 * switching between high and low RR.
12396 * This macro can be used whenever a comparison has to be made between one
12397 * hw state and multiple sw state variables.
12398 */
12399 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12400 if (!intel_compare_link_m_n(&current_config->name, \
12401 &pipe_config->name, adjust) && \
12402 !intel_compare_link_m_n(&current_config->alt_name, \
12403 &pipe_config->name, adjust)) { \
12404 pipe_config_err(adjust, __stringify(name), \
12405 "(expected tu %i gmch %i/%i link %i/%i, " \
12406 "or tu %i gmch %i/%i link %i/%i, " \
12407 "found tu %i, gmch %i/%i link %i/%i)\n", \
12408 current_config->name.tu, \
12409 current_config->name.gmch_m, \
12410 current_config->name.gmch_n, \
12411 current_config->name.link_m, \
12412 current_config->name.link_n, \
12413 current_config->alt_name.tu, \
12414 current_config->alt_name.gmch_m, \
12415 current_config->alt_name.gmch_n, \
12416 current_config->alt_name.link_m, \
12417 current_config->alt_name.link_n, \
12418 pipe_config->name.tu, \
12419 pipe_config->name.gmch_m, \
12420 pipe_config->name.gmch_n, \
12421 pipe_config->name.link_m, \
12422 pipe_config->name.link_n); \
12423 ret = false; \
12424 } \
12425 } while (0)
12426
12427 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12428 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12429 pipe_config_err(adjust, __stringify(name), \
12430 "(%x) (expected %i, found %i)\n", \
12431 (mask), \
12432 current_config->name & (mask), \
12433 pipe_config->name & (mask)); \
12434 ret = false; \
12435 } \
12436 } while (0)
12437
12438 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12439 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12440 pipe_config_err(adjust, __stringify(name), \
12441 "(expected %i, found %i)\n", \
12442 current_config->name, \
12443 pipe_config->name); \
12444 ret = false; \
12445 } \
12446 } while (0)
12447
12448 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12449 if (!intel_compare_infoframe(&current_config->infoframes.name, \
12450 &pipe_config->infoframes.name)) { \
12451 pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
12452 &current_config->infoframes.name, \
12453 &pipe_config->infoframes.name); \
12454 ret = false; \
12455 } \
12456 } while (0)
12457
12458 #define PIPE_CONF_QUIRK(quirk) \
12459 ((current_config->quirks | pipe_config->quirks) & (quirk))
12460
12461 PIPE_CONF_CHECK_I(cpu_transcoder);
12462
12463 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12464 PIPE_CONF_CHECK_I(fdi_lanes);
12465 PIPE_CONF_CHECK_M_N(fdi_m_n);
12466
12467 PIPE_CONF_CHECK_I(lane_count);
12468 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12469
12470 if (INTEL_GEN(dev_priv) < 8) {
12471 PIPE_CONF_CHECK_M_N(dp_m_n);
12472
12473 if (current_config->has_drrs)
12474 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12475 } else
12476 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12477
12478 PIPE_CONF_CHECK_X(output_types);
12479
12480 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12481 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12482 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12483 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12484 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12485 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12486
12487 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12488 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12489 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12490 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12491 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12492 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12493
12494 PIPE_CONF_CHECK_I(pixel_multiplier);
12495 PIPE_CONF_CHECK_I(output_format);
12496 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12497 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12498 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12499 PIPE_CONF_CHECK_BOOL(limited_color_range);
12500
12501 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12502 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12503 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12504
12505 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12506
12507 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12508 DRM_MODE_FLAG_INTERLACE);
12509
12510 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12511 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12512 DRM_MODE_FLAG_PHSYNC);
12513 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12514 DRM_MODE_FLAG_NHSYNC);
12515 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12516 DRM_MODE_FLAG_PVSYNC);
12517 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12518 DRM_MODE_FLAG_NVSYNC);
12519 }
12520
12521 PIPE_CONF_CHECK_X(gmch_pfit.control);
12522 /* pfit ratios are autocomputed by the hw on gen4+ */
12523 if (INTEL_GEN(dev_priv) < 4)
12524 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12525 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12526
12527 /*
12528 * Changing the EDP transcoder input mux
12529 * (A_ONOFF vs. A_ON) requires a full modeset.
12530 */
12531 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
12532
12533 if (!adjust) {
12534 PIPE_CONF_CHECK_I(pipe_src_w);
12535 PIPE_CONF_CHECK_I(pipe_src_h);
12536
12537 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12538 if (current_config->pch_pfit.enabled) {
12539 PIPE_CONF_CHECK_X(pch_pfit.pos);
12540 PIPE_CONF_CHECK_X(pch_pfit.size);
12541 }
12542
12543 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12544 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12545
12546 PIPE_CONF_CHECK_X(gamma_mode);
12547 if (IS_CHERRYVIEW(dev_priv))
12548 PIPE_CONF_CHECK_X(cgm_mode);
12549 else
12550 PIPE_CONF_CHECK_X(csc_mode);
12551 PIPE_CONF_CHECK_BOOL(gamma_enable);
12552 PIPE_CONF_CHECK_BOOL(csc_enable);
12553 }
12554
12555 PIPE_CONF_CHECK_BOOL(double_wide);
12556
12557 PIPE_CONF_CHECK_P(shared_dpll);
12558 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12559 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12560 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12561 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12562 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12563 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12564 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12565 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12566 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12567 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12568 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12569 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12570 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12571 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12572 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12573 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12574 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12575 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12576 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12577 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12578 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12579 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12580 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12581 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12582 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12583 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12584 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12585 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12586 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12587 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12588 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12589
12590 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12591 PIPE_CONF_CHECK_X(dsi_pll.div);
12592
12593 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12594 PIPE_CONF_CHECK_I(pipe_bpp);
12595
12596 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12597 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12598
12599 PIPE_CONF_CHECK_I(min_voltage_level);
12600
12601 PIPE_CONF_CHECK_X(infoframes.enable);
12602 PIPE_CONF_CHECK_X(infoframes.gcp);
12603 PIPE_CONF_CHECK_INFOFRAME(avi);
12604 PIPE_CONF_CHECK_INFOFRAME(spd);
12605 PIPE_CONF_CHECK_INFOFRAME(hdmi);
12606 PIPE_CONF_CHECK_INFOFRAME(drm);
12607
12608 #undef PIPE_CONF_CHECK_X
12609 #undef PIPE_CONF_CHECK_I
12610 #undef PIPE_CONF_CHECK_BOOL
12611 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12612 #undef PIPE_CONF_CHECK_P
12613 #undef PIPE_CONF_CHECK_FLAGS
12614 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12615 #undef PIPE_CONF_QUIRK
12616
12617 return ret;
12618 }
12619
12620 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12621 const struct intel_crtc_state *pipe_config)
12622 {
12623 if (pipe_config->has_pch_encoder) {
12624 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12625 &pipe_config->fdi_m_n);
12626 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12627
12628 /*
12629 * FDI already provided one idea for the dotclock.
12630 * Yell if the encoder disagrees.
12631 */
12632 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12633 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12634 fdi_dotclock, dotclock);
12635 }
12636 }
12637
12638 static void verify_wm_state(struct drm_crtc *crtc,
12639 struct drm_crtc_state *new_state)
12640 {
12641 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12642 struct skl_hw_state {
12643 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12644 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12645 struct skl_ddb_allocation ddb;
12646 struct skl_pipe_wm wm;
12647 } *hw;
12648 struct skl_ddb_allocation *sw_ddb;
12649 struct skl_pipe_wm *sw_wm;
12650 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12651 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12652 const enum pipe pipe = intel_crtc->pipe;
12653 int plane, level, max_level = ilk_wm_max_level(dev_priv);
12654
12655 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12656 return;
12657
12658 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12659 if (!hw)
12660 return;
12661
12662 skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
12663 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12664
12665 skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
12666
12667 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12668 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12669
12670 if (INTEL_GEN(dev_priv) >= 11 &&
12671 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12672 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12673 sw_ddb->enabled_slices,
12674 hw->ddb.enabled_slices);
12675
12676 /* planes */
12677 for_each_universal_plane(dev_priv, pipe, plane) {
12678 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12679
12680 hw_plane_wm = &hw->wm.planes[plane];
12681 sw_plane_wm = &sw_wm->planes[plane];
12682
12683 /* Watermarks */
12684 for (level = 0; level <= max_level; level++) {
12685 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12686 &sw_plane_wm->wm[level]))
12687 continue;
12688
12689 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12690 pipe_name(pipe), plane + 1, level,
12691 sw_plane_wm->wm[level].plane_en,
12692 sw_plane_wm->wm[level].plane_res_b,
12693 sw_plane_wm->wm[level].plane_res_l,
12694 hw_plane_wm->wm[level].plane_en,
12695 hw_plane_wm->wm[level].plane_res_b,
12696 hw_plane_wm->wm[level].plane_res_l);
12697 }
12698
12699 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12700 &sw_plane_wm->trans_wm)) {
12701 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12702 pipe_name(pipe), plane + 1,
12703 sw_plane_wm->trans_wm.plane_en,
12704 sw_plane_wm->trans_wm.plane_res_b,
12705 sw_plane_wm->trans_wm.plane_res_l,
12706 hw_plane_wm->trans_wm.plane_en,
12707 hw_plane_wm->trans_wm.plane_res_b,
12708 hw_plane_wm->trans_wm.plane_res_l);
12709 }
12710
12711 /* DDB */
12712 hw_ddb_entry = &hw->ddb_y[plane];
12713 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12714
12715 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12716 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12717 pipe_name(pipe), plane + 1,
12718 sw_ddb_entry->start, sw_ddb_entry->end,
12719 hw_ddb_entry->start, hw_ddb_entry->end);
12720 }
12721 }
12722
12723 /*
12724 * cursor
12725 * If the cursor plane isn't active, we may not have updated it's ddb
12726 * allocation. In that case since the ddb allocation will be updated
12727 * once the plane becomes visible, we can skip this check
12728 */
12729 if (1) {
12730 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12731
12732 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12733 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12734
12735 /* Watermarks */
12736 for (level = 0; level <= max_level; level++) {
12737 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12738 &sw_plane_wm->wm[level]))
12739 continue;
12740
12741 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12742 pipe_name(pipe), level,
12743 sw_plane_wm->wm[level].plane_en,
12744 sw_plane_wm->wm[level].plane_res_b,
12745 sw_plane_wm->wm[level].plane_res_l,
12746 hw_plane_wm->wm[level].plane_en,
12747 hw_plane_wm->wm[level].plane_res_b,
12748 hw_plane_wm->wm[level].plane_res_l);
12749 }
12750
12751 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12752 &sw_plane_wm->trans_wm)) {
12753 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12754 pipe_name(pipe),
12755 sw_plane_wm->trans_wm.plane_en,
12756 sw_plane_wm->trans_wm.plane_res_b,
12757 sw_plane_wm->trans_wm.plane_res_l,
12758 hw_plane_wm->trans_wm.plane_en,
12759 hw_plane_wm->trans_wm.plane_res_b,
12760 hw_plane_wm->trans_wm.plane_res_l);
12761 }
12762
12763 /* DDB */
12764 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12765 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12766
12767 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12768 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12769 pipe_name(pipe),
12770 sw_ddb_entry->start, sw_ddb_entry->end,
12771 hw_ddb_entry->start, hw_ddb_entry->end);
12772 }
12773 }
12774
12775 kfree(hw);
12776 }
12777
12778 static void
12779 verify_connector_state(struct drm_device *dev,
12780 struct drm_atomic_state *state,
12781 struct drm_crtc *crtc)
12782 {
12783 struct drm_connector *connector;
12784 struct drm_connector_state *new_conn_state;
12785 int i;
12786
12787 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12788 struct drm_encoder *encoder = connector->encoder;
12789 struct drm_crtc_state *crtc_state = NULL;
12790
12791 if (new_conn_state->crtc != crtc)
12792 continue;
12793
12794 if (crtc)
12795 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12796
12797 intel_connector_verify_state(crtc_state, new_conn_state);
12798
12799 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12800 "connector's atomic encoder doesn't match legacy encoder\n");
12801 }
12802 }
12803
12804 static void
12805 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12806 {
12807 struct intel_encoder *encoder;
12808 struct drm_connector *connector;
12809 struct drm_connector_state *old_conn_state, *new_conn_state;
12810 int i;
12811
12812 for_each_intel_encoder(dev, encoder) {
12813 bool enabled = false, found = false;
12814 enum pipe pipe;
12815
12816 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12817 encoder->base.base.id,
12818 encoder->base.name);
12819
12820 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12821 new_conn_state, i) {
12822 if (old_conn_state->best_encoder == &encoder->base)
12823 found = true;
12824
12825 if (new_conn_state->best_encoder != &encoder->base)
12826 continue;
12827 found = enabled = true;
12828
12829 I915_STATE_WARN(new_conn_state->crtc !=
12830 encoder->base.crtc,
12831 "connector's crtc doesn't match encoder crtc\n");
12832 }
12833
12834 if (!found)
12835 continue;
12836
12837 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12838 "encoder's enabled state mismatch "
12839 "(expected %i, found %i)\n",
12840 !!encoder->base.crtc, enabled);
12841
12842 if (!encoder->base.crtc) {
12843 bool active;
12844
12845 active = encoder->get_hw_state(encoder, &pipe);
12846 I915_STATE_WARN(active,
12847 "encoder detached but still enabled on pipe %c.\n",
12848 pipe_name(pipe));
12849 }
12850 }
12851 }
12852
12853 static void
12854 verify_crtc_state(struct drm_crtc *crtc,
12855 struct drm_crtc_state *old_crtc_state,
12856 struct drm_crtc_state *new_crtc_state)
12857 {
12858 struct drm_device *dev = crtc->dev;
12859 struct drm_i915_private *dev_priv = to_i915(dev);
12860 struct intel_encoder *encoder;
12861 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12862 struct intel_crtc_state *pipe_config, *sw_config;
12863 struct drm_atomic_state *old_state;
12864 bool active;
12865
12866 old_state = old_crtc_state->state;
12867 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12868 pipe_config = to_intel_crtc_state(old_crtc_state);
12869 memset(pipe_config, 0, sizeof(*pipe_config));
12870 pipe_config->base.crtc = crtc;
12871 pipe_config->base.state = old_state;
12872
12873 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12874
12875 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12876
12877 /* we keep both pipes enabled on 830 */
12878 if (IS_I830(dev_priv))
12879 active = new_crtc_state->active;
12880
12881 I915_STATE_WARN(new_crtc_state->active != active,
12882 "crtc active state doesn't match with hw state "
12883 "(expected %i, found %i)\n", new_crtc_state->active, active);
12884
12885 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12886 "transitional active state does not match atomic hw state "
12887 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12888
12889 for_each_encoder_on_crtc(dev, crtc, encoder) {
12890 enum pipe pipe;
12891
12892 active = encoder->get_hw_state(encoder, &pipe);
12893 I915_STATE_WARN(active != new_crtc_state->active,
12894 "[ENCODER:%i] active %i with crtc active %i\n",
12895 encoder->base.base.id, active, new_crtc_state->active);
12896
12897 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12898 "Encoder connected to wrong pipe %c\n",
12899 pipe_name(pipe));
12900
12901 if (active)
12902 encoder->get_config(encoder, pipe_config);
12903 }
12904
12905 intel_crtc_compute_pixel_rate(pipe_config);
12906
12907 if (!new_crtc_state->active)
12908 return;
12909
12910 intel_pipe_config_sanity_check(dev_priv, pipe_config);
12911
12912 sw_config = to_intel_crtc_state(new_crtc_state);
12913 if (!intel_pipe_config_compare(dev_priv, sw_config,
12914 pipe_config, false)) {
12915 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12916 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
12917 intel_dump_pipe_config(sw_config, NULL, "[sw state]");
12918 }
12919 }
12920
12921 static void
12922 intel_verify_planes(struct intel_atomic_state *state)
12923 {
12924 struct intel_plane *plane;
12925 const struct intel_plane_state *plane_state;
12926 int i;
12927
12928 for_each_new_intel_plane_in_state(state, plane,
12929 plane_state, i)
12930 assert_plane(plane, plane_state->slave ||
12931 plane_state->base.visible);
12932 }
12933
12934 static void
12935 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12936 struct intel_shared_dpll *pll,
12937 struct drm_crtc *crtc,
12938 struct drm_crtc_state *new_state)
12939 {
12940 struct intel_dpll_hw_state dpll_hw_state;
12941 unsigned int crtc_mask;
12942 bool active;
12943
12944 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12945
12946 DRM_DEBUG_KMS("%s\n", pll->info->name);
12947
12948 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12949
12950 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12951 I915_STATE_WARN(!pll->on && pll->active_mask,
12952 "pll in active use but not on in sw tracking\n");
12953 I915_STATE_WARN(pll->on && !pll->active_mask,
12954 "pll is on but not used by any active crtc\n");
12955 I915_STATE_WARN(pll->on != active,
12956 "pll on state mismatch (expected %i, found %i)\n",
12957 pll->on, active);
12958 }
12959
12960 if (!crtc) {
12961 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12962 "more active pll users than references: %x vs %x\n",
12963 pll->active_mask, pll->state.crtc_mask);
12964
12965 return;
12966 }
12967
12968 crtc_mask = drm_crtc_mask(crtc);
12969
12970 if (new_state->active)
12971 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12972 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12973 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12974 else
12975 I915_STATE_WARN(pll->active_mask & crtc_mask,
12976 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12977 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12978
12979 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12980 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12981 crtc_mask, pll->state.crtc_mask);
12982
12983 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12984 &dpll_hw_state,
12985 sizeof(dpll_hw_state)),
12986 "pll hw state mismatch\n");
12987 }
12988
12989 static void
12990 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12991 struct drm_crtc_state *old_crtc_state,
12992 struct drm_crtc_state *new_crtc_state)
12993 {
12994 struct drm_i915_private *dev_priv = to_i915(dev);
12995 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12996 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12997
12998 if (new_state->shared_dpll)
12999 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13000
13001 if (old_state->shared_dpll &&
13002 old_state->shared_dpll != new_state->shared_dpll) {
13003 unsigned int crtc_mask = drm_crtc_mask(crtc);
13004 struct intel_shared_dpll *pll = old_state->shared_dpll;
13005
13006 I915_STATE_WARN(pll->active_mask & crtc_mask,
13007 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13008 pipe_name(drm_crtc_index(crtc)));
13009 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13010 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13011 pipe_name(drm_crtc_index(crtc)));
13012 }
13013 }
13014
13015 static void
13016 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13017 struct drm_atomic_state *state,
13018 struct drm_crtc_state *old_state,
13019 struct drm_crtc_state *new_state)
13020 {
13021 if (!needs_modeset(new_state) &&
13022 !to_intel_crtc_state(new_state)->update_pipe)
13023 return;
13024
13025 verify_wm_state(crtc, new_state);
13026 verify_connector_state(crtc->dev, state, crtc);
13027 verify_crtc_state(crtc, old_state, new_state);
13028 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13029 }
13030
13031 static void
13032 verify_disabled_dpll_state(struct drm_device *dev)
13033 {
13034 struct drm_i915_private *dev_priv = to_i915(dev);
13035 int i;
13036
13037 for (i = 0; i < dev_priv->num_shared_dpll; i++)
13038 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13039 }
13040
13041 static void
13042 intel_modeset_verify_disabled(struct drm_device *dev,
13043 struct drm_atomic_state *state)
13044 {
13045 verify_encoder_state(dev, state);
13046 verify_connector_state(dev, state, NULL);
13047 verify_disabled_dpll_state(dev);
13048 }
13049
13050 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
13051 {
13052 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13053 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13054
13055 /*
13056 * The scanline counter increments at the leading edge of hsync.
13057 *
13058 * On most platforms it starts counting from vtotal-1 on the
13059 * first active line. That means the scanline counter value is
13060 * always one less than what we would expect. Ie. just after
13061 * start of vblank, which also occurs at start of hsync (on the
13062 * last active line), the scanline counter will read vblank_start-1.
13063 *
13064 * On gen2 the scanline counter starts counting from 1 instead
13065 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13066 * to keep the value positive), instead of adding one.
13067 *
13068 * On HSW+ the behaviour of the scanline counter depends on the output
13069 * type. For DP ports it behaves like most other platforms, but on HDMI
13070 * there's an extra 1 line difference. So we need to add two instead of
13071 * one to the value.
13072 *
13073 * On VLV/CHV DSI the scanline counter would appear to increment
13074 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13075 * that means we can't tell whether we're in vblank or not while
13076 * we're on that particular line. We must still set scanline_offset
13077 * to 1 so that the vblank timestamps come out correct when we query
13078 * the scanline counter from within the vblank interrupt handler.
13079 * However if queried just before the start of vblank we'll get an
13080 * answer that's slightly in the future.
13081 */
13082 if (IS_GEN(dev_priv, 2)) {
13083 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
13084 int vtotal;
13085
13086 vtotal = adjusted_mode->crtc_vtotal;
13087 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13088 vtotal /= 2;
13089
13090 crtc->scanline_offset = vtotal - 1;
13091 } else if (HAS_DDI(dev_priv) &&
13092 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13093 crtc->scanline_offset = 2;
13094 } else
13095 crtc->scanline_offset = 1;
13096 }
13097
13098 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13099 {
13100 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13101 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13102 struct intel_crtc *crtc;
13103 int i;
13104
13105 if (!dev_priv->display.crtc_compute_clock)
13106 return;
13107
13108 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13109 new_crtc_state, i) {
13110 struct intel_shared_dpll *old_dpll =
13111 old_crtc_state->shared_dpll;
13112
13113 if (!needs_modeset(&new_crtc_state->base))
13114 continue;
13115
13116 new_crtc_state->shared_dpll = NULL;
13117
13118 if (!old_dpll)
13119 continue;
13120
13121 intel_release_shared_dpll(old_dpll, crtc, &state->base);
13122 }
13123 }
13124
13125 /*
13126 * This implements the workaround described in the "notes" section of the mode
13127 * set sequence documentation. When going from no pipes or single pipe to
13128 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13129 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13130 */
13131 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13132 {
13133 struct intel_crtc_state *crtc_state;
13134 struct intel_crtc *crtc;
13135 struct intel_crtc_state *first_crtc_state = NULL;
13136 struct intel_crtc_state *other_crtc_state = NULL;
13137 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13138 int i;
13139
13140 /* look at all crtc's that are going to be enabled in during modeset */
13141 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13142 if (!crtc_state->base.active ||
13143 !needs_modeset(&crtc_state->base))
13144 continue;
13145
13146 if (first_crtc_state) {
13147 other_crtc_state = crtc_state;
13148 break;
13149 } else {
13150 first_crtc_state = crtc_state;
13151 first_pipe = crtc->pipe;
13152 }
13153 }
13154
13155 /* No workaround needed? */
13156 if (!first_crtc_state)
13157 return 0;
13158
13159 /* w/a possibly needed, check how many crtc's are already enabled. */
13160 for_each_intel_crtc(state->base.dev, crtc) {
13161 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13162 if (IS_ERR(crtc_state))
13163 return PTR_ERR(crtc_state);
13164
13165 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13166
13167 if (!crtc_state->base.active ||
13168 needs_modeset(&crtc_state->base))
13169 continue;
13170
13171 /* 2 or more enabled crtcs means no need for w/a */
13172 if (enabled_pipe != INVALID_PIPE)
13173 return 0;
13174
13175 enabled_pipe = crtc->pipe;
13176 }
13177
13178 if (enabled_pipe != INVALID_PIPE)
13179 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13180 else if (other_crtc_state)
13181 other_crtc_state->hsw_workaround_pipe = first_pipe;
13182
13183 return 0;
13184 }
13185
13186 static int intel_lock_all_pipes(struct drm_atomic_state *state)
13187 {
13188 struct drm_crtc *crtc;
13189
13190 /* Add all pipes to the state */
13191 for_each_crtc(state->dev, crtc) {
13192 struct drm_crtc_state *crtc_state;
13193
13194 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13195 if (IS_ERR(crtc_state))
13196 return PTR_ERR(crtc_state);
13197 }
13198
13199 return 0;
13200 }
13201
13202 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13203 {
13204 struct drm_crtc *crtc;
13205
13206 /*
13207 * Add all pipes to the state, and force
13208 * a modeset on all the active ones.
13209 */
13210 for_each_crtc(state->dev, crtc) {
13211 struct drm_crtc_state *crtc_state;
13212 int ret;
13213
13214 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13215 if (IS_ERR(crtc_state))
13216 return PTR_ERR(crtc_state);
13217
13218 if (!crtc_state->active || needs_modeset(crtc_state))
13219 continue;
13220
13221 crtc_state->mode_changed = true;
13222
13223 ret = drm_atomic_add_affected_connectors(state, crtc);
13224 if (ret)
13225 return ret;
13226
13227 ret = drm_atomic_add_affected_planes(state, crtc);
13228 if (ret)
13229 return ret;
13230 }
13231
13232 return 0;
13233 }
13234
13235 static int intel_modeset_checks(struct intel_atomic_state *state)
13236 {
13237 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13238 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13239 struct intel_crtc *crtc;
13240 int ret = 0, i;
13241
13242 if (!check_digital_port_conflicts(state)) {
13243 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13244 return -EINVAL;
13245 }
13246
13247 /* keep the current setting */
13248 if (!state->cdclk.force_min_cdclk_changed)
13249 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13250
13251 state->modeset = true;
13252 state->active_crtcs = dev_priv->active_crtcs;
13253 state->cdclk.logical = dev_priv->cdclk.logical;
13254 state->cdclk.actual = dev_priv->cdclk.actual;
13255 state->cdclk.pipe = INVALID_PIPE;
13256
13257 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13258 new_crtc_state, i) {
13259 if (new_crtc_state->base.active)
13260 state->active_crtcs |= 1 << i;
13261 else
13262 state->active_crtcs &= ~(1 << i);
13263
13264 if (old_crtc_state->base.active != new_crtc_state->base.active)
13265 state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
13266 }
13267
13268 /*
13269 * See if the config requires any additional preparation, e.g.
13270 * to adjust global state with pipes off. We need to do this
13271 * here so we can get the modeset_pipe updated config for the new
13272 * mode set on this crtc. For other crtcs we need to use the
13273 * adjusted_mode bits in the crtc directly.
13274 */
13275 if (dev_priv->display.modeset_calc_cdclk) {
13276 enum pipe pipe;
13277
13278 ret = dev_priv->display.modeset_calc_cdclk(state);
13279 if (ret < 0)
13280 return ret;
13281
13282 /*
13283 * Writes to dev_priv->cdclk.logical must protected by
13284 * holding all the crtc locks, even if we don't end up
13285 * touching the hardware
13286 */
13287 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13288 &state->cdclk.logical)) {
13289 ret = intel_lock_all_pipes(&state->base);
13290 if (ret < 0)
13291 return ret;
13292 }
13293
13294 if (is_power_of_2(state->active_crtcs)) {
13295 struct drm_crtc *crtc;
13296 struct drm_crtc_state *crtc_state;
13297
13298 pipe = ilog2(state->active_crtcs);
13299 crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
13300 crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
13301 if (crtc_state && needs_modeset(crtc_state))
13302 pipe = INVALID_PIPE;
13303 } else {
13304 pipe = INVALID_PIPE;
13305 }
13306
13307 /* All pipes must be switched off while we change the cdclk. */
13308 if (pipe != INVALID_PIPE &&
13309 intel_cdclk_needs_cd2x_update(dev_priv,
13310 &dev_priv->cdclk.actual,
13311 &state->cdclk.actual)) {
13312 ret = intel_lock_all_pipes(&state->base);
13313 if (ret < 0)
13314 return ret;
13315
13316 state->cdclk.pipe = pipe;
13317 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13318 &state->cdclk.actual)) {
13319 ret = intel_modeset_all_pipes(&state->base);
13320 if (ret < 0)
13321 return ret;
13322
13323 state->cdclk.pipe = INVALID_PIPE;
13324 }
13325
13326 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13327 state->cdclk.logical.cdclk,
13328 state->cdclk.actual.cdclk);
13329 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13330 state->cdclk.logical.voltage_level,
13331 state->cdclk.actual.voltage_level);
13332 }
13333
13334 intel_modeset_clear_plls(state);
13335
13336 if (IS_HASWELL(dev_priv))
13337 return haswell_mode_set_planes_workaround(state);
13338
13339 return 0;
13340 }
13341
13342 /*
13343 * Handle calculation of various watermark data at the end of the atomic check
13344 * phase. The code here should be run after the per-crtc and per-plane 'check'
13345 * handlers to ensure that all derived state has been updated.
13346 */
13347 static int calc_watermark_data(struct intel_atomic_state *state)
13348 {
13349 struct drm_device *dev = state->base.dev;
13350 struct drm_i915_private *dev_priv = to_i915(dev);
13351
13352 /* Is there platform-specific watermark information to calculate? */
13353 if (dev_priv->display.compute_global_watermarks)
13354 return dev_priv->display.compute_global_watermarks(state);
13355
13356 return 0;
13357 }
13358
13359 /**
13360 * intel_atomic_check - validate state object
13361 * @dev: drm device
13362 * @state: state to validate
13363 */
13364 static int intel_atomic_check(struct drm_device *dev,
13365 struct drm_atomic_state *_state)
13366 {
13367 struct drm_i915_private *dev_priv = to_i915(dev);
13368 struct intel_atomic_state *state = to_intel_atomic_state(_state);
13369 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13370 struct intel_crtc *crtc;
13371 int ret, i;
13372 bool any_ms = state->cdclk.force_min_cdclk_changed;
13373
13374 /* Catch I915_MODE_FLAG_INHERITED */
13375 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13376 new_crtc_state, i) {
13377 if (new_crtc_state->base.mode.private_flags !=
13378 old_crtc_state->base.mode.private_flags)
13379 new_crtc_state->base.mode_changed = true;
13380 }
13381
13382 ret = drm_atomic_helper_check_modeset(dev, &state->base);
13383 if (ret)
13384 goto fail;
13385
13386 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13387 new_crtc_state, i) {
13388 if (!needs_modeset(&new_crtc_state->base))
13389 continue;
13390
13391 if (!new_crtc_state->base.enable) {
13392 any_ms = true;
13393 continue;
13394 }
13395
13396 ret = intel_modeset_pipe_config(new_crtc_state);
13397 if (ret)
13398 goto fail;
13399
13400 if (intel_pipe_config_compare(dev_priv, old_crtc_state,
13401 new_crtc_state, true)) {
13402 new_crtc_state->base.mode_changed = false;
13403 new_crtc_state->update_pipe = true;
13404 }
13405
13406 if (needs_modeset(&new_crtc_state->base))
13407 any_ms = true;
13408 }
13409
13410 ret = drm_dp_mst_atomic_check(&state->base);
13411 if (ret)
13412 goto fail;
13413
13414 if (any_ms) {
13415 ret = intel_modeset_checks(state);
13416 if (ret)
13417 goto fail;
13418 } else {
13419 state->cdclk.logical = dev_priv->cdclk.logical;
13420 }
13421
13422 ret = icl_add_linked_planes(state);
13423 if (ret)
13424 goto fail;
13425
13426 ret = drm_atomic_helper_check_planes(dev, &state->base);
13427 if (ret)
13428 goto fail;
13429
13430 intel_fbc_choose_crtc(dev_priv, state);
13431 ret = calc_watermark_data(state);
13432 if (ret)
13433 goto fail;
13434
13435 ret = intel_bw_atomic_check(state);
13436 if (ret)
13437 goto fail;
13438
13439 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13440 new_crtc_state, i) {
13441 if (!needs_modeset(&new_crtc_state->base) &&
13442 !new_crtc_state->update_pipe)
13443 continue;
13444
13445 intel_dump_pipe_config(new_crtc_state, state,
13446 needs_modeset(&new_crtc_state->base) ?
13447 "[modeset]" : "[fastset]");
13448 }
13449
13450 return 0;
13451
13452 fail:
13453 if (ret == -EDEADLK)
13454 return ret;
13455
13456 /*
13457 * FIXME would probably be nice to know which crtc specifically
13458 * caused the failure, in cases where we can pinpoint it.
13459 */
13460 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13461 new_crtc_state, i)
13462 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
13463
13464 return ret;
13465 }
13466
13467 static int intel_atomic_prepare_commit(struct drm_device *dev,
13468 struct drm_atomic_state *state)
13469 {
13470 return drm_atomic_helper_prepare_planes(dev, state);
13471 }
13472
13473 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13474 {
13475 struct drm_device *dev = crtc->base.dev;
13476 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13477
13478 if (!vblank->max_vblank_count)
13479 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13480
13481 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13482 }
13483
13484 static void intel_update_crtc(struct drm_crtc *crtc,
13485 struct drm_atomic_state *state,
13486 struct drm_crtc_state *old_crtc_state,
13487 struct drm_crtc_state *new_crtc_state)
13488 {
13489 struct drm_device *dev = crtc->dev;
13490 struct drm_i915_private *dev_priv = to_i915(dev);
13491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13492 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13493 bool modeset = needs_modeset(new_crtc_state);
13494 struct intel_plane_state *new_plane_state =
13495 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13496 to_intel_plane(crtc->primary));
13497
13498 if (modeset) {
13499 update_scanline_offset(pipe_config);
13500 dev_priv->display.crtc_enable(pipe_config, state);
13501
13502 /* vblanks work again, re-enable pipe CRC. */
13503 intel_crtc_enable_pipe_crc(intel_crtc);
13504 } else {
13505 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13506 pipe_config);
13507
13508 if (pipe_config->update_pipe)
13509 intel_encoders_update_pipe(crtc, pipe_config, state);
13510 }
13511
13512 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13513 intel_fbc_disable(intel_crtc);
13514 else if (new_plane_state)
13515 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13516
13517 intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13518
13519 if (INTEL_GEN(dev_priv) >= 9)
13520 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13521 else
13522 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13523
13524 intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13525 }
13526
13527 static void intel_update_crtcs(struct drm_atomic_state *state)
13528 {
13529 struct drm_crtc *crtc;
13530 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13531 int i;
13532
13533 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13534 if (!new_crtc_state->active)
13535 continue;
13536
13537 intel_update_crtc(crtc, state, old_crtc_state,
13538 new_crtc_state);
13539 }
13540 }
13541
13542 static void skl_update_crtcs(struct drm_atomic_state *state)
13543 {
13544 struct drm_i915_private *dev_priv = to_i915(state->dev);
13545 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13546 struct drm_crtc *crtc;
13547 struct intel_crtc *intel_crtc;
13548 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13549 struct intel_crtc_state *cstate;
13550 unsigned int updated = 0;
13551 bool progress;
13552 enum pipe pipe;
13553 int i;
13554 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13555 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13556 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13557
13558 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13559 /* ignore allocations for crtc's that have been turned off. */
13560 if (new_crtc_state->active)
13561 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13562
13563 /* If 2nd DBuf slice required, enable it here */
13564 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13565 icl_dbuf_slices_update(dev_priv, required_slices);
13566
13567 /*
13568 * Whenever the number of active pipes changes, we need to make sure we
13569 * update the pipes in the right order so that their ddb allocations
13570 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13571 * cause pipe underruns and other bad stuff.
13572 */
13573 do {
13574 progress = false;
13575
13576 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13577 bool vbl_wait = false;
13578 unsigned int cmask = drm_crtc_mask(crtc);
13579
13580 intel_crtc = to_intel_crtc(crtc);
13581 cstate = to_intel_crtc_state(new_crtc_state);
13582 pipe = intel_crtc->pipe;
13583
13584 if (updated & cmask || !cstate->base.active)
13585 continue;
13586
13587 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13588 entries,
13589 INTEL_INFO(dev_priv)->num_pipes, i))
13590 continue;
13591
13592 updated |= cmask;
13593 entries[i] = cstate->wm.skl.ddb;
13594
13595 /*
13596 * If this is an already active pipe, it's DDB changed,
13597 * and this isn't the last pipe that needs updating
13598 * then we need to wait for a vblank to pass for the
13599 * new ddb allocation to take effect.
13600 */
13601 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13602 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13603 !new_crtc_state->active_changed &&
13604 intel_state->wm_results.dirty_pipes != updated)
13605 vbl_wait = true;
13606
13607 intel_update_crtc(crtc, state, old_crtc_state,
13608 new_crtc_state);
13609
13610 if (vbl_wait)
13611 intel_wait_for_vblank(dev_priv, pipe);
13612
13613 progress = true;
13614 }
13615 } while (progress);
13616
13617 /* If 2nd DBuf slice is no more required disable it */
13618 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13619 icl_dbuf_slices_update(dev_priv, required_slices);
13620 }
13621
13622 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13623 {
13624 struct intel_atomic_state *state, *next;
13625 struct llist_node *freed;
13626
13627 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13628 llist_for_each_entry_safe(state, next, freed, freed)
13629 drm_atomic_state_put(&state->base);
13630 }
13631
13632 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13633 {
13634 struct drm_i915_private *dev_priv =
13635 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13636
13637 intel_atomic_helper_free_state(dev_priv);
13638 }
13639
13640 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13641 {
13642 struct wait_queue_entry wait_fence, wait_reset;
13643 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13644
13645 init_wait_entry(&wait_fence, 0);
13646 init_wait_entry(&wait_reset, 0);
13647 for (;;) {
13648 prepare_to_wait(&intel_state->commit_ready.wait,
13649 &wait_fence, TASK_UNINTERRUPTIBLE);
13650 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13651 &wait_reset, TASK_UNINTERRUPTIBLE);
13652
13653
13654 if (i915_sw_fence_done(&intel_state->commit_ready)
13655 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13656 break;
13657
13658 schedule();
13659 }
13660 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13661 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13662 }
13663
13664 static void intel_atomic_cleanup_work(struct work_struct *work)
13665 {
13666 struct drm_atomic_state *state =
13667 container_of(work, struct drm_atomic_state, commit_work);
13668 struct drm_i915_private *i915 = to_i915(state->dev);
13669
13670 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13671 drm_atomic_helper_commit_cleanup_done(state);
13672 drm_atomic_state_put(state);
13673
13674 intel_atomic_helper_free_state(i915);
13675 }
13676
13677 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13678 {
13679 struct drm_device *dev = state->dev;
13680 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13681 struct drm_i915_private *dev_priv = to_i915(dev);
13682 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13683 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13684 struct drm_crtc *crtc;
13685 struct intel_crtc *intel_crtc;
13686 u64 put_domains[I915_MAX_PIPES] = {};
13687 intel_wakeref_t wakeref = 0;
13688 int i;
13689
13690 intel_atomic_commit_fence_wait(intel_state);
13691
13692 drm_atomic_helper_wait_for_dependencies(state);
13693
13694 if (intel_state->modeset)
13695 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13696
13697 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13698 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13699 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13700 intel_crtc = to_intel_crtc(crtc);
13701
13702 if (needs_modeset(new_crtc_state) ||
13703 to_intel_crtc_state(new_crtc_state)->update_pipe) {
13704
13705 put_domains[intel_crtc->pipe] =
13706 modeset_get_crtc_power_domains(crtc,
13707 new_intel_crtc_state);
13708 }
13709
13710 if (!needs_modeset(new_crtc_state))
13711 continue;
13712
13713 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13714
13715 if (old_crtc_state->active) {
13716 intel_crtc_disable_planes(intel_state, intel_crtc);
13717
13718 /*
13719 * We need to disable pipe CRC before disabling the pipe,
13720 * or we race against vblank off.
13721 */
13722 intel_crtc_disable_pipe_crc(intel_crtc);
13723
13724 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13725 intel_crtc->active = false;
13726 intel_fbc_disable(intel_crtc);
13727 intel_disable_shared_dpll(old_intel_crtc_state);
13728
13729 /*
13730 * Underruns don't always raise
13731 * interrupts, so check manually.
13732 */
13733 intel_check_cpu_fifo_underruns(dev_priv);
13734 intel_check_pch_fifo_underruns(dev_priv);
13735
13736 /* FIXME unify this for all platforms */
13737 if (!new_crtc_state->active &&
13738 !HAS_GMCH(dev_priv) &&
13739 dev_priv->display.initial_watermarks)
13740 dev_priv->display.initial_watermarks(intel_state,
13741 new_intel_crtc_state);
13742 }
13743 }
13744
13745 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13746 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13747 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13748
13749 if (intel_state->modeset) {
13750 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13751
13752 intel_set_cdclk_pre_plane_update(dev_priv,
13753 &intel_state->cdclk.actual,
13754 &dev_priv->cdclk.actual,
13755 intel_state->cdclk.pipe);
13756
13757 /*
13758 * SKL workaround: bspec recommends we disable the SAGV when we
13759 * have more then one pipe enabled
13760 */
13761 if (!intel_can_enable_sagv(state))
13762 intel_disable_sagv(dev_priv);
13763
13764 intel_modeset_verify_disabled(dev, state);
13765 }
13766
13767 /* Complete the events for pipes that have now been disabled */
13768 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13769 bool modeset = needs_modeset(new_crtc_state);
13770
13771 /* Complete events for now disable pipes here. */
13772 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13773 spin_lock_irq(&dev->event_lock);
13774 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13775 spin_unlock_irq(&dev->event_lock);
13776
13777 new_crtc_state->event = NULL;
13778 }
13779 }
13780
13781 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13782 dev_priv->display.update_crtcs(state);
13783
13784 if (intel_state->modeset)
13785 intel_set_cdclk_post_plane_update(dev_priv,
13786 &intel_state->cdclk.actual,
13787 &dev_priv->cdclk.actual,
13788 intel_state->cdclk.pipe);
13789
13790 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13791 * already, but still need the state for the delayed optimization. To
13792 * fix this:
13793 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13794 * - schedule that vblank worker _before_ calling hw_done
13795 * - at the start of commit_tail, cancel it _synchrously
13796 * - switch over to the vblank wait helper in the core after that since
13797 * we don't need out special handling any more.
13798 */
13799 drm_atomic_helper_wait_for_flip_done(dev, state);
13800
13801 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13802 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13803
13804 if (new_crtc_state->active &&
13805 !needs_modeset(new_crtc_state) &&
13806 (new_intel_crtc_state->base.color_mgmt_changed ||
13807 new_intel_crtc_state->update_pipe))
13808 intel_color_load_luts(new_intel_crtc_state);
13809 }
13810
13811 /*
13812 * Now that the vblank has passed, we can go ahead and program the
13813 * optimal watermarks on platforms that need two-step watermark
13814 * programming.
13815 *
13816 * TODO: Move this (and other cleanup) to an async worker eventually.
13817 */
13818 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13819 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13820
13821 if (dev_priv->display.optimize_watermarks)
13822 dev_priv->display.optimize_watermarks(intel_state,
13823 new_intel_crtc_state);
13824 }
13825
13826 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13827 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13828
13829 if (put_domains[i])
13830 modeset_put_power_domains(dev_priv, put_domains[i]);
13831
13832 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13833 }
13834
13835 if (intel_state->modeset)
13836 intel_verify_planes(intel_state);
13837
13838 if (intel_state->modeset && intel_can_enable_sagv(state))
13839 intel_enable_sagv(dev_priv);
13840
13841 drm_atomic_helper_commit_hw_done(state);
13842
13843 if (intel_state->modeset) {
13844 /* As one of the primary mmio accessors, KMS has a high
13845 * likelihood of triggering bugs in unclaimed access. After we
13846 * finish modesetting, see if an error has been flagged, and if
13847 * so enable debugging for the next modeset - and hope we catch
13848 * the culprit.
13849 */
13850 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13851 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13852 }
13853 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
13854
13855 /*
13856 * Defer the cleanup of the old state to a separate worker to not
13857 * impede the current task (userspace for blocking modesets) that
13858 * are executed inline. For out-of-line asynchronous modesets/flips,
13859 * deferring to a new worker seems overkill, but we would place a
13860 * schedule point (cond_resched()) here anyway to keep latencies
13861 * down.
13862 */
13863 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13864 queue_work(system_highpri_wq, &state->commit_work);
13865 }
13866
13867 static void intel_atomic_commit_work(struct work_struct *work)
13868 {
13869 struct drm_atomic_state *state =
13870 container_of(work, struct drm_atomic_state, commit_work);
13871
13872 intel_atomic_commit_tail(state);
13873 }
13874
13875 static int __i915_sw_fence_call
13876 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13877 enum i915_sw_fence_notify notify)
13878 {
13879 struct intel_atomic_state *state =
13880 container_of(fence, struct intel_atomic_state, commit_ready);
13881
13882 switch (notify) {
13883 case FENCE_COMPLETE:
13884 /* we do blocking waits in the worker, nothing to do here */
13885 break;
13886 case FENCE_FREE:
13887 {
13888 struct intel_atomic_helper *helper =
13889 &to_i915(state->base.dev)->atomic_helper;
13890
13891 if (llist_add(&state->freed, &helper->free_list))
13892 schedule_work(&helper->free_work);
13893 break;
13894 }
13895 }
13896
13897 return NOTIFY_DONE;
13898 }
13899
13900 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13901 {
13902 struct drm_plane_state *old_plane_state, *new_plane_state;
13903 struct drm_plane *plane;
13904 int i;
13905
13906 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13907 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13908 intel_fb_obj(new_plane_state->fb),
13909 to_intel_plane(plane)->frontbuffer_bit);
13910 }
13911
13912 /**
13913 * intel_atomic_commit - commit validated state object
13914 * @dev: DRM device
13915 * @state: the top-level driver state object
13916 * @nonblock: nonblocking commit
13917 *
13918 * This function commits a top-level state object that has been validated
13919 * with drm_atomic_helper_check().
13920 *
13921 * RETURNS
13922 * Zero for success or -errno.
13923 */
13924 static int intel_atomic_commit(struct drm_device *dev,
13925 struct drm_atomic_state *state,
13926 bool nonblock)
13927 {
13928 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13929 struct drm_i915_private *dev_priv = to_i915(dev);
13930 int ret = 0;
13931
13932 intel_state->wakeref = intel_runtime_pm_get(dev_priv);
13933
13934 drm_atomic_state_get(state);
13935 i915_sw_fence_init(&intel_state->commit_ready,
13936 intel_atomic_commit_ready);
13937
13938 /*
13939 * The intel_legacy_cursor_update() fast path takes care
13940 * of avoiding the vblank waits for simple cursor
13941 * movement and flips. For cursor on/off and size changes,
13942 * we want to perform the vblank waits so that watermark
13943 * updates happen during the correct frames. Gen9+ have
13944 * double buffered watermarks and so shouldn't need this.
13945 *
13946 * Unset state->legacy_cursor_update before the call to
13947 * drm_atomic_helper_setup_commit() because otherwise
13948 * drm_atomic_helper_wait_for_flip_done() is a noop and
13949 * we get FIFO underruns because we didn't wait
13950 * for vblank.
13951 *
13952 * FIXME doing watermarks and fb cleanup from a vblank worker
13953 * (assuming we had any) would solve these problems.
13954 */
13955 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13956 struct intel_crtc_state *new_crtc_state;
13957 struct intel_crtc *crtc;
13958 int i;
13959
13960 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13961 if (new_crtc_state->wm.need_postvbl_update ||
13962 new_crtc_state->update_wm_post)
13963 state->legacy_cursor_update = false;
13964 }
13965
13966 ret = intel_atomic_prepare_commit(dev, state);
13967 if (ret) {
13968 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13969 i915_sw_fence_commit(&intel_state->commit_ready);
13970 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
13971 return ret;
13972 }
13973
13974 ret = drm_atomic_helper_setup_commit(state, nonblock);
13975 if (!ret)
13976 ret = drm_atomic_helper_swap_state(state, true);
13977
13978 if (ret) {
13979 i915_sw_fence_commit(&intel_state->commit_ready);
13980
13981 drm_atomic_helper_cleanup_planes(dev, state);
13982 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
13983 return ret;
13984 }
13985 dev_priv->wm.distrust_bios_wm = false;
13986 intel_shared_dpll_swap_state(state);
13987 intel_atomic_track_fbs(state);
13988
13989 if (intel_state->modeset) {
13990 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13991 sizeof(intel_state->min_cdclk));
13992 memcpy(dev_priv->min_voltage_level,
13993 intel_state->min_voltage_level,
13994 sizeof(intel_state->min_voltage_level));
13995 dev_priv->active_crtcs = intel_state->active_crtcs;
13996 dev_priv->cdclk.force_min_cdclk =
13997 intel_state->cdclk.force_min_cdclk;
13998
13999 intel_cdclk_swap_state(intel_state);
14000 }
14001
14002 drm_atomic_state_get(state);
14003 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
14004
14005 i915_sw_fence_commit(&intel_state->commit_ready);
14006 if (nonblock && intel_state->modeset) {
14007 queue_work(dev_priv->modeset_wq, &state->commit_work);
14008 } else if (nonblock) {
14009 queue_work(system_unbound_wq, &state->commit_work);
14010 } else {
14011 if (intel_state->modeset)
14012 flush_workqueue(dev_priv->modeset_wq);
14013 intel_atomic_commit_tail(state);
14014 }
14015
14016 return 0;
14017 }
14018
14019 static const struct drm_crtc_funcs intel_crtc_funcs = {
14020 .gamma_set = drm_atomic_helper_legacy_gamma_set,
14021 .set_config = drm_atomic_helper_set_config,
14022 .destroy = intel_crtc_destroy,
14023 .page_flip = drm_atomic_helper_page_flip,
14024 .atomic_duplicate_state = intel_crtc_duplicate_state,
14025 .atomic_destroy_state = intel_crtc_destroy_state,
14026 .set_crc_source = intel_crtc_set_crc_source,
14027 .verify_crc_source = intel_crtc_verify_crc_source,
14028 .get_crc_sources = intel_crtc_get_crc_sources,
14029 };
14030
14031 struct wait_rps_boost {
14032 struct wait_queue_entry wait;
14033
14034 struct drm_crtc *crtc;
14035 struct i915_request *request;
14036 };
14037
14038 static int do_rps_boost(struct wait_queue_entry *_wait,
14039 unsigned mode, int sync, void *key)
14040 {
14041 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14042 struct i915_request *rq = wait->request;
14043
14044 /*
14045 * If we missed the vblank, but the request is already running it
14046 * is reasonable to assume that it will complete before the next
14047 * vblank without our intervention, so leave RPS alone.
14048 */
14049 if (!i915_request_started(rq))
14050 gen6_rps_boost(rq);
14051 i915_request_put(rq);
14052
14053 drm_crtc_vblank_put(wait->crtc);
14054
14055 list_del(&wait->wait.entry);
14056 kfree(wait);
14057 return 1;
14058 }
14059
14060 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14061 struct dma_fence *fence)
14062 {
14063 struct wait_rps_boost *wait;
14064
14065 if (!dma_fence_is_i915(fence))
14066 return;
14067
14068 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14069 return;
14070
14071 if (drm_crtc_vblank_get(crtc))
14072 return;
14073
14074 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14075 if (!wait) {
14076 drm_crtc_vblank_put(crtc);
14077 return;
14078 }
14079
14080 wait->request = to_request(dma_fence_get(fence));
14081 wait->crtc = crtc;
14082
14083 wait->wait.func = do_rps_boost;
14084 wait->wait.flags = 0;
14085
14086 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14087 }
14088
14089 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14090 {
14091 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14092 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14093 struct drm_framebuffer *fb = plane_state->base.fb;
14094 struct i915_vma *vma;
14095
14096 if (plane->id == PLANE_CURSOR &&
14097 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14098 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14099 const int align = intel_cursor_alignment(dev_priv);
14100 int err;
14101
14102 err = i915_gem_object_attach_phys(obj, align);
14103 if (err)
14104 return err;
14105 }
14106
14107 vma = intel_pin_and_fence_fb_obj(fb,
14108 &plane_state->view,
14109 intel_plane_uses_fence(plane_state),
14110 &plane_state->flags);
14111 if (IS_ERR(vma))
14112 return PTR_ERR(vma);
14113
14114 plane_state->vma = vma;
14115
14116 return 0;
14117 }
14118
14119 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14120 {
14121 struct i915_vma *vma;
14122
14123 vma = fetch_and_zero(&old_plane_state->vma);
14124 if (vma)
14125 intel_unpin_fb_vma(vma, old_plane_state->flags);
14126 }
14127
14128 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14129 {
14130 struct i915_sched_attr attr = {
14131 .priority = I915_PRIORITY_DISPLAY,
14132 };
14133
14134 i915_gem_object_wait_priority(obj, 0, &attr);
14135 }
14136
14137 /**
14138 * intel_prepare_plane_fb - Prepare fb for usage on plane
14139 * @plane: drm plane to prepare for
14140 * @new_state: the plane state being prepared
14141 *
14142 * Prepares a framebuffer for usage on a display plane. Generally this
14143 * involves pinning the underlying object and updating the frontbuffer tracking
14144 * bits. Some older platforms need special physical address handling for
14145 * cursor planes.
14146 *
14147 * Must be called with struct_mutex held.
14148 *
14149 * Returns 0 on success, negative error code on failure.
14150 */
14151 int
14152 intel_prepare_plane_fb(struct drm_plane *plane,
14153 struct drm_plane_state *new_state)
14154 {
14155 struct intel_atomic_state *intel_state =
14156 to_intel_atomic_state(new_state->state);
14157 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14158 struct drm_framebuffer *fb = new_state->fb;
14159 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14160 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14161 int ret;
14162
14163 if (old_obj) {
14164 struct drm_crtc_state *crtc_state =
14165 drm_atomic_get_new_crtc_state(new_state->state,
14166 plane->state->crtc);
14167
14168 /* Big Hammer, we also need to ensure that any pending
14169 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14170 * current scanout is retired before unpinning the old
14171 * framebuffer. Note that we rely on userspace rendering
14172 * into the buffer attached to the pipe they are waiting
14173 * on. If not, userspace generates a GPU hang with IPEHR
14174 * point to the MI_WAIT_FOR_EVENT.
14175 *
14176 * This should only fail upon a hung GPU, in which case we
14177 * can safely continue.
14178 */
14179 if (needs_modeset(crtc_state)) {
14180 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14181 old_obj->resv, NULL,
14182 false, 0,
14183 GFP_KERNEL);
14184 if (ret < 0)
14185 return ret;
14186 }
14187 }
14188
14189 if (new_state->fence) { /* explicit fencing */
14190 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14191 new_state->fence,
14192 I915_FENCE_TIMEOUT,
14193 GFP_KERNEL);
14194 if (ret < 0)
14195 return ret;
14196 }
14197
14198 if (!obj)
14199 return 0;
14200
14201 ret = i915_gem_object_pin_pages(obj);
14202 if (ret)
14203 return ret;
14204
14205 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14206 if (ret) {
14207 i915_gem_object_unpin_pages(obj);
14208 return ret;
14209 }
14210
14211 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14212
14213 mutex_unlock(&dev_priv->drm.struct_mutex);
14214 i915_gem_object_unpin_pages(obj);
14215 if (ret)
14216 return ret;
14217
14218 fb_obj_bump_render_priority(obj);
14219 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14220
14221 if (!new_state->fence) { /* implicit fencing */
14222 struct dma_fence *fence;
14223
14224 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14225 obj->resv, NULL,
14226 false, I915_FENCE_TIMEOUT,
14227 GFP_KERNEL);
14228 if (ret < 0)
14229 return ret;
14230
14231 fence = reservation_object_get_excl_rcu(obj->resv);
14232 if (fence) {
14233 add_rps_boost_after_vblank(new_state->crtc, fence);
14234 dma_fence_put(fence);
14235 }
14236 } else {
14237 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14238 }
14239
14240 /*
14241 * We declare pageflips to be interactive and so merit a small bias
14242 * towards upclocking to deliver the frame on time. By only changing
14243 * the RPS thresholds to sample more regularly and aim for higher
14244 * clocks we can hopefully deliver low power workloads (like kodi)
14245 * that are not quite steady state without resorting to forcing
14246 * maximum clocks following a vblank miss (see do_rps_boost()).
14247 */
14248 if (!intel_state->rps_interactive) {
14249 intel_rps_mark_interactive(dev_priv, true);
14250 intel_state->rps_interactive = true;
14251 }
14252
14253 return 0;
14254 }
14255
14256 /**
14257 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14258 * @plane: drm plane to clean up for
14259 * @old_state: the state from the previous modeset
14260 *
14261 * Cleans up a framebuffer that has just been removed from a plane.
14262 *
14263 * Must be called with struct_mutex held.
14264 */
14265 void
14266 intel_cleanup_plane_fb(struct drm_plane *plane,
14267 struct drm_plane_state *old_state)
14268 {
14269 struct intel_atomic_state *intel_state =
14270 to_intel_atomic_state(old_state->state);
14271 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14272
14273 if (intel_state->rps_interactive) {
14274 intel_rps_mark_interactive(dev_priv, false);
14275 intel_state->rps_interactive = false;
14276 }
14277
14278 /* Should only be called after a successful intel_prepare_plane_fb()! */
14279 mutex_lock(&dev_priv->drm.struct_mutex);
14280 intel_plane_unpin_fb(to_intel_plane_state(old_state));
14281 mutex_unlock(&dev_priv->drm.struct_mutex);
14282 }
14283
14284 int
14285 skl_max_scale(const struct intel_crtc_state *crtc_state,
14286 u32 pixel_format)
14287 {
14288 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14289 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14290 int max_scale, mult;
14291 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14292
14293 if (!crtc_state->base.enable)
14294 return DRM_PLANE_HELPER_NO_SCALING;
14295
14296 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14297 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14298
14299 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14300 max_dotclk *= 2;
14301
14302 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14303 return DRM_PLANE_HELPER_NO_SCALING;
14304
14305 /*
14306 * skl max scale is lower of:
14307 * close to 3 but not 3, -1 is for that purpose
14308 * or
14309 * cdclk/crtc_clock
14310 */
14311 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14312 tmpclk1 = (1 << 16) * mult - 1;
14313 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14314 max_scale = min(tmpclk1, tmpclk2);
14315
14316 return max_scale;
14317 }
14318
14319 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14320 struct intel_crtc *crtc)
14321 {
14322 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14323 struct intel_crtc_state *old_crtc_state =
14324 intel_atomic_get_old_crtc_state(state, crtc);
14325 struct intel_crtc_state *new_crtc_state =
14326 intel_atomic_get_new_crtc_state(state, crtc);
14327 bool modeset = needs_modeset(&new_crtc_state->base);
14328
14329 /* Perform vblank evasion around commit operation */
14330 intel_pipe_update_start(new_crtc_state);
14331
14332 if (modeset)
14333 goto out;
14334
14335 if (new_crtc_state->base.color_mgmt_changed ||
14336 new_crtc_state->update_pipe)
14337 intel_color_commit(new_crtc_state);
14338
14339 if (new_crtc_state->update_pipe)
14340 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14341 else if (INTEL_GEN(dev_priv) >= 9)
14342 skl_detach_scalers(new_crtc_state);
14343
14344 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14345 bdw_set_pipemisc(new_crtc_state);
14346
14347 out:
14348 if (dev_priv->display.atomic_update_watermarks)
14349 dev_priv->display.atomic_update_watermarks(state,
14350 new_crtc_state);
14351 }
14352
14353 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14354 struct intel_crtc_state *crtc_state)
14355 {
14356 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14357
14358 if (!IS_GEN(dev_priv, 2))
14359 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14360
14361 if (crtc_state->has_pch_encoder) {
14362 enum pipe pch_transcoder =
14363 intel_crtc_pch_transcoder(crtc);
14364
14365 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14366 }
14367 }
14368
14369 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14370 struct intel_crtc *crtc)
14371 {
14372 struct intel_crtc_state *old_crtc_state =
14373 intel_atomic_get_old_crtc_state(state, crtc);
14374 struct intel_crtc_state *new_crtc_state =
14375 intel_atomic_get_new_crtc_state(state, crtc);
14376
14377 intel_pipe_update_end(new_crtc_state);
14378
14379 if (new_crtc_state->update_pipe &&
14380 !needs_modeset(&new_crtc_state->base) &&
14381 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14382 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14383 }
14384
14385 /**
14386 * intel_plane_destroy - destroy a plane
14387 * @plane: plane to destroy
14388 *
14389 * Common destruction function for all types of planes (primary, cursor,
14390 * sprite).
14391 */
14392 void intel_plane_destroy(struct drm_plane *plane)
14393 {
14394 drm_plane_cleanup(plane);
14395 kfree(to_intel_plane(plane));
14396 }
14397
14398 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14399 u32 format, u64 modifier)
14400 {
14401 switch (modifier) {
14402 case DRM_FORMAT_MOD_LINEAR:
14403 case I915_FORMAT_MOD_X_TILED:
14404 break;
14405 default:
14406 return false;
14407 }
14408
14409 switch (format) {
14410 case DRM_FORMAT_C8:
14411 case DRM_FORMAT_RGB565:
14412 case DRM_FORMAT_XRGB1555:
14413 case DRM_FORMAT_XRGB8888:
14414 return modifier == DRM_FORMAT_MOD_LINEAR ||
14415 modifier == I915_FORMAT_MOD_X_TILED;
14416 default:
14417 return false;
14418 }
14419 }
14420
14421 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14422 u32 format, u64 modifier)
14423 {
14424 switch (modifier) {
14425 case DRM_FORMAT_MOD_LINEAR:
14426 case I915_FORMAT_MOD_X_TILED:
14427 break;
14428 default:
14429 return false;
14430 }
14431
14432 switch (format) {
14433 case DRM_FORMAT_C8:
14434 case DRM_FORMAT_RGB565:
14435 case DRM_FORMAT_XRGB8888:
14436 case DRM_FORMAT_XBGR8888:
14437 case DRM_FORMAT_XRGB2101010:
14438 case DRM_FORMAT_XBGR2101010:
14439 return modifier == DRM_FORMAT_MOD_LINEAR ||
14440 modifier == I915_FORMAT_MOD_X_TILED;
14441 default:
14442 return false;
14443 }
14444 }
14445
14446 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14447 u32 format, u64 modifier)
14448 {
14449 return modifier == DRM_FORMAT_MOD_LINEAR &&
14450 format == DRM_FORMAT_ARGB8888;
14451 }
14452
14453 static const struct drm_plane_funcs i965_plane_funcs = {
14454 .update_plane = drm_atomic_helper_update_plane,
14455 .disable_plane = drm_atomic_helper_disable_plane,
14456 .destroy = intel_plane_destroy,
14457 .atomic_get_property = intel_plane_atomic_get_property,
14458 .atomic_set_property = intel_plane_atomic_set_property,
14459 .atomic_duplicate_state = intel_plane_duplicate_state,
14460 .atomic_destroy_state = intel_plane_destroy_state,
14461 .format_mod_supported = i965_plane_format_mod_supported,
14462 };
14463
14464 static const struct drm_plane_funcs i8xx_plane_funcs = {
14465 .update_plane = drm_atomic_helper_update_plane,
14466 .disable_plane = drm_atomic_helper_disable_plane,
14467 .destroy = intel_plane_destroy,
14468 .atomic_get_property = intel_plane_atomic_get_property,
14469 .atomic_set_property = intel_plane_atomic_set_property,
14470 .atomic_duplicate_state = intel_plane_duplicate_state,
14471 .atomic_destroy_state = intel_plane_destroy_state,
14472 .format_mod_supported = i8xx_plane_format_mod_supported,
14473 };
14474
14475 static int
14476 intel_legacy_cursor_update(struct drm_plane *plane,
14477 struct drm_crtc *crtc,
14478 struct drm_framebuffer *fb,
14479 int crtc_x, int crtc_y,
14480 unsigned int crtc_w, unsigned int crtc_h,
14481 u32 src_x, u32 src_y,
14482 u32 src_w, u32 src_h,
14483 struct drm_modeset_acquire_ctx *ctx)
14484 {
14485 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14486 int ret;
14487 struct drm_plane_state *old_plane_state, *new_plane_state;
14488 struct intel_plane *intel_plane = to_intel_plane(plane);
14489 struct drm_framebuffer *old_fb;
14490 struct intel_crtc_state *crtc_state =
14491 to_intel_crtc_state(crtc->state);
14492 struct intel_crtc_state *new_crtc_state;
14493
14494 /*
14495 * When crtc is inactive or there is a modeset pending,
14496 * wait for it to complete in the slowpath
14497 */
14498 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14499 crtc_state->update_pipe)
14500 goto slow;
14501
14502 old_plane_state = plane->state;
14503 /*
14504 * Don't do an async update if there is an outstanding commit modifying
14505 * the plane. This prevents our async update's changes from getting
14506 * overridden by a previous synchronous update's state.
14507 */
14508 if (old_plane_state->commit &&
14509 !try_wait_for_completion(&old_plane_state->commit->hw_done))
14510 goto slow;
14511
14512 /*
14513 * If any parameters change that may affect watermarks,
14514 * take the slowpath. Only changing fb or position should be
14515 * in the fastpath.
14516 */
14517 if (old_plane_state->crtc != crtc ||
14518 old_plane_state->src_w != src_w ||
14519 old_plane_state->src_h != src_h ||
14520 old_plane_state->crtc_w != crtc_w ||
14521 old_plane_state->crtc_h != crtc_h ||
14522 !old_plane_state->fb != !fb)
14523 goto slow;
14524
14525 new_plane_state = intel_plane_duplicate_state(plane);
14526 if (!new_plane_state)
14527 return -ENOMEM;
14528
14529 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14530 if (!new_crtc_state) {
14531 ret = -ENOMEM;
14532 goto out_free;
14533 }
14534
14535 drm_atomic_set_fb_for_plane(new_plane_state, fb);
14536
14537 new_plane_state->src_x = src_x;
14538 new_plane_state->src_y = src_y;
14539 new_plane_state->src_w = src_w;
14540 new_plane_state->src_h = src_h;
14541 new_plane_state->crtc_x = crtc_x;
14542 new_plane_state->crtc_y = crtc_y;
14543 new_plane_state->crtc_w = crtc_w;
14544 new_plane_state->crtc_h = crtc_h;
14545
14546 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14547 to_intel_plane_state(old_plane_state),
14548 to_intel_plane_state(new_plane_state));
14549 if (ret)
14550 goto out_free;
14551
14552 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14553 if (ret)
14554 goto out_free;
14555
14556 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14557 if (ret)
14558 goto out_unlock;
14559
14560 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14561
14562 old_fb = old_plane_state->fb;
14563 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14564 intel_plane->frontbuffer_bit);
14565
14566 /* Swap plane state */
14567 plane->state = new_plane_state;
14568
14569 /*
14570 * We cannot swap crtc_state as it may be in use by an atomic commit or
14571 * page flip that's running simultaneously. If we swap crtc_state and
14572 * destroy the old state, we will cause a use-after-free there.
14573 *
14574 * Only update active_planes, which is needed for our internal
14575 * bookkeeping. Either value will do the right thing when updating
14576 * planes atomically. If the cursor was part of the atomic update then
14577 * we would have taken the slowpath.
14578 */
14579 crtc_state->active_planes = new_crtc_state->active_planes;
14580
14581 if (plane->state->visible)
14582 intel_update_plane(intel_plane, crtc_state,
14583 to_intel_plane_state(plane->state));
14584 else
14585 intel_disable_plane(intel_plane, crtc_state);
14586
14587 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14588
14589 out_unlock:
14590 mutex_unlock(&dev_priv->drm.struct_mutex);
14591 out_free:
14592 if (new_crtc_state)
14593 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14594 if (ret)
14595 intel_plane_destroy_state(plane, new_plane_state);
14596 else
14597 intel_plane_destroy_state(plane, old_plane_state);
14598 return ret;
14599
14600 slow:
14601 return drm_atomic_helper_update_plane(plane, crtc, fb,
14602 crtc_x, crtc_y, crtc_w, crtc_h,
14603 src_x, src_y, src_w, src_h, ctx);
14604 }
14605
14606 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14607 .update_plane = intel_legacy_cursor_update,
14608 .disable_plane = drm_atomic_helper_disable_plane,
14609 .destroy = intel_plane_destroy,
14610 .atomic_get_property = intel_plane_atomic_get_property,
14611 .atomic_set_property = intel_plane_atomic_set_property,
14612 .atomic_duplicate_state = intel_plane_duplicate_state,
14613 .atomic_destroy_state = intel_plane_destroy_state,
14614 .format_mod_supported = intel_cursor_format_mod_supported,
14615 };
14616
14617 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14618 enum i9xx_plane_id i9xx_plane)
14619 {
14620 if (!HAS_FBC(dev_priv))
14621 return false;
14622
14623 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14624 return i9xx_plane == PLANE_A; /* tied to pipe A */
14625 else if (IS_IVYBRIDGE(dev_priv))
14626 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14627 i9xx_plane == PLANE_C;
14628 else if (INTEL_GEN(dev_priv) >= 4)
14629 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14630 else
14631 return i9xx_plane == PLANE_A;
14632 }
14633
14634 static struct intel_plane *
14635 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14636 {
14637 struct intel_plane *plane;
14638 const struct drm_plane_funcs *plane_funcs;
14639 unsigned int supported_rotations;
14640 unsigned int possible_crtcs;
14641 const u64 *modifiers;
14642 const u32 *formats;
14643 int num_formats;
14644 int ret;
14645
14646 if (INTEL_GEN(dev_priv) >= 9)
14647 return skl_universal_plane_create(dev_priv, pipe,
14648 PLANE_PRIMARY);
14649
14650 plane = intel_plane_alloc();
14651 if (IS_ERR(plane))
14652 return plane;
14653
14654 plane->pipe = pipe;
14655 /*
14656 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14657 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14658 */
14659 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14660 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14661 else
14662 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14663 plane->id = PLANE_PRIMARY;
14664 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14665
14666 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14667 if (plane->has_fbc) {
14668 struct intel_fbc *fbc = &dev_priv->fbc;
14669
14670 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14671 }
14672
14673 if (INTEL_GEN(dev_priv) >= 4) {
14674 formats = i965_primary_formats;
14675 num_formats = ARRAY_SIZE(i965_primary_formats);
14676 modifiers = i9xx_format_modifiers;
14677
14678 plane->max_stride = i9xx_plane_max_stride;
14679 plane->update_plane = i9xx_update_plane;
14680 plane->disable_plane = i9xx_disable_plane;
14681 plane->get_hw_state = i9xx_plane_get_hw_state;
14682 plane->check_plane = i9xx_plane_check;
14683
14684 plane_funcs = &i965_plane_funcs;
14685 } else {
14686 formats = i8xx_primary_formats;
14687 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14688 modifiers = i9xx_format_modifiers;
14689
14690 plane->max_stride = i9xx_plane_max_stride;
14691 plane->update_plane = i9xx_update_plane;
14692 plane->disable_plane = i9xx_disable_plane;
14693 plane->get_hw_state = i9xx_plane_get_hw_state;
14694 plane->check_plane = i9xx_plane_check;
14695
14696 plane_funcs = &i8xx_plane_funcs;
14697 }
14698
14699 possible_crtcs = BIT(pipe);
14700
14701 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14702 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14703 possible_crtcs, plane_funcs,
14704 formats, num_formats, modifiers,
14705 DRM_PLANE_TYPE_PRIMARY,
14706 "primary %c", pipe_name(pipe));
14707 else
14708 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14709 possible_crtcs, plane_funcs,
14710 formats, num_formats, modifiers,
14711 DRM_PLANE_TYPE_PRIMARY,
14712 "plane %c",
14713 plane_name(plane->i9xx_plane));
14714 if (ret)
14715 goto fail;
14716
14717 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14718 supported_rotations =
14719 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14720 DRM_MODE_REFLECT_X;
14721 } else if (INTEL_GEN(dev_priv) >= 4) {
14722 supported_rotations =
14723 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14724 } else {
14725 supported_rotations = DRM_MODE_ROTATE_0;
14726 }
14727
14728 if (INTEL_GEN(dev_priv) >= 4)
14729 drm_plane_create_rotation_property(&plane->base,
14730 DRM_MODE_ROTATE_0,
14731 supported_rotations);
14732
14733 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14734
14735 return plane;
14736
14737 fail:
14738 intel_plane_free(plane);
14739
14740 return ERR_PTR(ret);
14741 }
14742
14743 static struct intel_plane *
14744 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14745 enum pipe pipe)
14746 {
14747 unsigned int possible_crtcs;
14748 struct intel_plane *cursor;
14749 int ret;
14750
14751 cursor = intel_plane_alloc();
14752 if (IS_ERR(cursor))
14753 return cursor;
14754
14755 cursor->pipe = pipe;
14756 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14757 cursor->id = PLANE_CURSOR;
14758 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14759
14760 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14761 cursor->max_stride = i845_cursor_max_stride;
14762 cursor->update_plane = i845_update_cursor;
14763 cursor->disable_plane = i845_disable_cursor;
14764 cursor->get_hw_state = i845_cursor_get_hw_state;
14765 cursor->check_plane = i845_check_cursor;
14766 } else {
14767 cursor->max_stride = i9xx_cursor_max_stride;
14768 cursor->update_plane = i9xx_update_cursor;
14769 cursor->disable_plane = i9xx_disable_cursor;
14770 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14771 cursor->check_plane = i9xx_check_cursor;
14772 }
14773
14774 cursor->cursor.base = ~0;
14775 cursor->cursor.cntl = ~0;
14776
14777 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14778 cursor->cursor.size = ~0;
14779
14780 possible_crtcs = BIT(pipe);
14781
14782 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14783 possible_crtcs, &intel_cursor_plane_funcs,
14784 intel_cursor_formats,
14785 ARRAY_SIZE(intel_cursor_formats),
14786 cursor_format_modifiers,
14787 DRM_PLANE_TYPE_CURSOR,
14788 "cursor %c", pipe_name(pipe));
14789 if (ret)
14790 goto fail;
14791
14792 if (INTEL_GEN(dev_priv) >= 4)
14793 drm_plane_create_rotation_property(&cursor->base,
14794 DRM_MODE_ROTATE_0,
14795 DRM_MODE_ROTATE_0 |
14796 DRM_MODE_ROTATE_180);
14797
14798 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14799
14800 return cursor;
14801
14802 fail:
14803 intel_plane_free(cursor);
14804
14805 return ERR_PTR(ret);
14806 }
14807
14808 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14809 struct intel_crtc_state *crtc_state)
14810 {
14811 struct intel_crtc_scaler_state *scaler_state =
14812 &crtc_state->scaler_state;
14813 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14814 int i;
14815
14816 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14817 if (!crtc->num_scalers)
14818 return;
14819
14820 for (i = 0; i < crtc->num_scalers; i++) {
14821 struct intel_scaler *scaler = &scaler_state->scalers[i];
14822
14823 scaler->in_use = 0;
14824 scaler->mode = 0;
14825 }
14826
14827 scaler_state->scaler_id = -1;
14828 }
14829
14830 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14831 {
14832 struct intel_crtc *intel_crtc;
14833 struct intel_crtc_state *crtc_state = NULL;
14834 struct intel_plane *primary = NULL;
14835 struct intel_plane *cursor = NULL;
14836 int sprite, ret;
14837
14838 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14839 if (!intel_crtc)
14840 return -ENOMEM;
14841
14842 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14843 if (!crtc_state) {
14844 ret = -ENOMEM;
14845 goto fail;
14846 }
14847 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
14848 intel_crtc->config = crtc_state;
14849
14850 primary = intel_primary_plane_create(dev_priv, pipe);
14851 if (IS_ERR(primary)) {
14852 ret = PTR_ERR(primary);
14853 goto fail;
14854 }
14855 intel_crtc->plane_ids_mask |= BIT(primary->id);
14856
14857 for_each_sprite(dev_priv, pipe, sprite) {
14858 struct intel_plane *plane;
14859
14860 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14861 if (IS_ERR(plane)) {
14862 ret = PTR_ERR(plane);
14863 goto fail;
14864 }
14865 intel_crtc->plane_ids_mask |= BIT(plane->id);
14866 }
14867
14868 cursor = intel_cursor_plane_create(dev_priv, pipe);
14869 if (IS_ERR(cursor)) {
14870 ret = PTR_ERR(cursor);
14871 goto fail;
14872 }
14873 intel_crtc->plane_ids_mask |= BIT(cursor->id);
14874
14875 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14876 &primary->base, &cursor->base,
14877 &intel_crtc_funcs,
14878 "pipe %c", pipe_name(pipe));
14879 if (ret)
14880 goto fail;
14881
14882 intel_crtc->pipe = pipe;
14883
14884 /* initialize shared scalers */
14885 intel_crtc_init_scalers(intel_crtc, crtc_state);
14886
14887 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14888 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14889 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14890
14891 if (INTEL_GEN(dev_priv) < 9) {
14892 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14893
14894 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14895 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14896 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14897 }
14898
14899 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14900
14901 intel_color_init(intel_crtc);
14902
14903 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14904
14905 return 0;
14906
14907 fail:
14908 /*
14909 * drm_mode_config_cleanup() will free up any
14910 * crtcs/planes already initialized.
14911 */
14912 kfree(crtc_state);
14913 kfree(intel_crtc);
14914
14915 return ret;
14916 }
14917
14918 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14919 struct drm_file *file)
14920 {
14921 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14922 struct drm_crtc *drmmode_crtc;
14923 struct intel_crtc *crtc;
14924
14925 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14926 if (!drmmode_crtc)
14927 return -ENOENT;
14928
14929 crtc = to_intel_crtc(drmmode_crtc);
14930 pipe_from_crtc_id->pipe = crtc->pipe;
14931
14932 return 0;
14933 }
14934
14935 static int intel_encoder_clones(struct intel_encoder *encoder)
14936 {
14937 struct drm_device *dev = encoder->base.dev;
14938 struct intel_encoder *source_encoder;
14939 int index_mask = 0;
14940 int entry = 0;
14941
14942 for_each_intel_encoder(dev, source_encoder) {
14943 if (encoders_cloneable(encoder, source_encoder))
14944 index_mask |= (1 << entry);
14945
14946 entry++;
14947 }
14948
14949 return index_mask;
14950 }
14951
14952 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14953 {
14954 if (!IS_MOBILE(dev_priv))
14955 return false;
14956
14957 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14958 return false;
14959
14960 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14961 return false;
14962
14963 return true;
14964 }
14965
14966 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14967 {
14968 if (INTEL_GEN(dev_priv) >= 9)
14969 return false;
14970
14971 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14972 return false;
14973
14974 if (HAS_PCH_LPT_H(dev_priv) &&
14975 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14976 return false;
14977
14978 /* DDI E can't be used if DDI A requires 4 lanes */
14979 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14980 return false;
14981
14982 if (!dev_priv->vbt.int_crt_support)
14983 return false;
14984
14985 return true;
14986 }
14987
14988 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14989 {
14990 int pps_num;
14991 int pps_idx;
14992
14993 if (HAS_DDI(dev_priv))
14994 return;
14995 /*
14996 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14997 * everywhere where registers can be write protected.
14998 */
14999 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15000 pps_num = 2;
15001 else
15002 pps_num = 1;
15003
15004 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15005 u32 val = I915_READ(PP_CONTROL(pps_idx));
15006
15007 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15008 I915_WRITE(PP_CONTROL(pps_idx), val);
15009 }
15010 }
15011
15012 static void intel_pps_init(struct drm_i915_private *dev_priv)
15013 {
15014 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15015 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15016 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15017 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15018 else
15019 dev_priv->pps_mmio_base = PPS_BASE;
15020
15021 intel_pps_unlock_regs_wa(dev_priv);
15022 }
15023
15024 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15025 {
15026 struct intel_encoder *encoder;
15027 bool dpd_is_edp = false;
15028
15029 intel_pps_init(dev_priv);
15030
15031 if (!HAS_DISPLAY(dev_priv))
15032 return;
15033
15034 if (IS_ELKHARTLAKE(dev_priv)) {
15035 intel_ddi_init(dev_priv, PORT_A);
15036 intel_ddi_init(dev_priv, PORT_B);
15037 intel_ddi_init(dev_priv, PORT_C);
15038 icl_dsi_init(dev_priv);
15039 } else if (INTEL_GEN(dev_priv) >= 11) {
15040 intel_ddi_init(dev_priv, PORT_A);
15041 intel_ddi_init(dev_priv, PORT_B);
15042 intel_ddi_init(dev_priv, PORT_C);
15043 intel_ddi_init(dev_priv, PORT_D);
15044 intel_ddi_init(dev_priv, PORT_E);
15045 /*
15046 * On some ICL SKUs port F is not present. No strap bits for
15047 * this, so rely on VBT.
15048 * Work around broken VBTs on SKUs known to have no port F.
15049 */
15050 if (IS_ICL_WITH_PORT_F(dev_priv) &&
15051 intel_bios_is_port_present(dev_priv, PORT_F))
15052 intel_ddi_init(dev_priv, PORT_F);
15053
15054 icl_dsi_init(dev_priv);
15055 } else if (IS_GEN9_LP(dev_priv)) {
15056 /*
15057 * FIXME: Broxton doesn't support port detection via the
15058 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15059 * detect the ports.
15060 */
15061 intel_ddi_init(dev_priv, PORT_A);
15062 intel_ddi_init(dev_priv, PORT_B);
15063 intel_ddi_init(dev_priv, PORT_C);
15064
15065 vlv_dsi_init(dev_priv);
15066 } else if (HAS_DDI(dev_priv)) {
15067 int found;
15068
15069 if (intel_ddi_crt_present(dev_priv))
15070 intel_crt_init(dev_priv);
15071
15072 /*
15073 * Haswell uses DDI functions to detect digital outputs.
15074 * On SKL pre-D0 the strap isn't connected, so we assume
15075 * it's there.
15076 */
15077 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15078 /* WaIgnoreDDIAStrap: skl */
15079 if (found || IS_GEN9_BC(dev_priv))
15080 intel_ddi_init(dev_priv, PORT_A);
15081
15082 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15083 * register */
15084 found = I915_READ(SFUSE_STRAP);
15085
15086 if (found & SFUSE_STRAP_DDIB_DETECTED)
15087 intel_ddi_init(dev_priv, PORT_B);
15088 if (found & SFUSE_STRAP_DDIC_DETECTED)
15089 intel_ddi_init(dev_priv, PORT_C);
15090 if (found & SFUSE_STRAP_DDID_DETECTED)
15091 intel_ddi_init(dev_priv, PORT_D);
15092 if (found & SFUSE_STRAP_DDIF_DETECTED)
15093 intel_ddi_init(dev_priv, PORT_F);
15094 /*
15095 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15096 */
15097 if (IS_GEN9_BC(dev_priv) &&
15098 intel_bios_is_port_present(dev_priv, PORT_E))
15099 intel_ddi_init(dev_priv, PORT_E);
15100
15101 } else if (HAS_PCH_SPLIT(dev_priv)) {
15102 int found;
15103
15104 /*
15105 * intel_edp_init_connector() depends on this completing first,
15106 * to prevent the registration of both eDP and LVDS and the
15107 * incorrect sharing of the PPS.
15108 */
15109 intel_lvds_init(dev_priv);
15110 intel_crt_init(dev_priv);
15111
15112 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
15113
15114 if (ilk_has_edp_a(dev_priv))
15115 intel_dp_init(dev_priv, DP_A, PORT_A);
15116
15117 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15118 /* PCH SDVOB multiplex with HDMIB */
15119 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
15120 if (!found)
15121 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
15122 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15123 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
15124 }
15125
15126 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15127 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
15128
15129 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15130 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
15131
15132 if (I915_READ(PCH_DP_C) & DP_DETECTED)
15133 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
15134
15135 if (I915_READ(PCH_DP_D) & DP_DETECTED)
15136 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
15137 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15138 bool has_edp, has_port;
15139
15140 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15141 intel_crt_init(dev_priv);
15142
15143 /*
15144 * The DP_DETECTED bit is the latched state of the DDC
15145 * SDA pin at boot. However since eDP doesn't require DDC
15146 * (no way to plug in a DP->HDMI dongle) the DDC pins for
15147 * eDP ports may have been muxed to an alternate function.
15148 * Thus we can't rely on the DP_DETECTED bit alone to detect
15149 * eDP ports. Consult the VBT as well as DP_DETECTED to
15150 * detect eDP ports.
15151 *
15152 * Sadly the straps seem to be missing sometimes even for HDMI
15153 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15154 * and VBT for the presence of the port. Additionally we can't
15155 * trust the port type the VBT declares as we've seen at least
15156 * HDMI ports that the VBT claim are DP or eDP.
15157 */
15158 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
15159 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15160 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15161 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
15162 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15163 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
15164
15165 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
15166 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15167 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15168 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
15169 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15170 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
15171
15172 if (IS_CHERRYVIEW(dev_priv)) {
15173 /*
15174 * eDP not supported on port D,
15175 * so no need to worry about it
15176 */
15177 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15178 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15179 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
15180 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15181 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
15182 }
15183
15184 vlv_dsi_init(dev_priv);
15185 } else if (IS_PINEVIEW(dev_priv)) {
15186 intel_lvds_init(dev_priv);
15187 intel_crt_init(dev_priv);
15188 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
15189 bool found = false;
15190
15191 if (IS_MOBILE(dev_priv))
15192 intel_lvds_init(dev_priv);
15193
15194 intel_crt_init(dev_priv);
15195
15196 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15197 DRM_DEBUG_KMS("probing SDVOB\n");
15198 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15199 if (!found && IS_G4X(dev_priv)) {
15200 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15201 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15202 }
15203
15204 if (!found && IS_G4X(dev_priv))
15205 intel_dp_init(dev_priv, DP_B, PORT_B);
15206 }
15207
15208 /* Before G4X SDVOC doesn't have its own detect register */
15209
15210 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15211 DRM_DEBUG_KMS("probing SDVOC\n");
15212 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15213 }
15214
15215 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15216
15217 if (IS_G4X(dev_priv)) {
15218 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15219 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15220 }
15221 if (IS_G4X(dev_priv))
15222 intel_dp_init(dev_priv, DP_C, PORT_C);
15223 }
15224
15225 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15226 intel_dp_init(dev_priv, DP_D, PORT_D);
15227
15228 if (SUPPORTS_TV(dev_priv))
15229 intel_tv_init(dev_priv);
15230 } else if (IS_GEN(dev_priv, 2)) {
15231 if (IS_I85X(dev_priv))
15232 intel_lvds_init(dev_priv);
15233
15234 intel_crt_init(dev_priv);
15235 intel_dvo_init(dev_priv);
15236 }
15237
15238 intel_psr_init(dev_priv);
15239
15240 for_each_intel_encoder(&dev_priv->drm, encoder) {
15241 encoder->base.possible_crtcs = encoder->crtc_mask;
15242 encoder->base.possible_clones =
15243 intel_encoder_clones(encoder);
15244 }
15245
15246 intel_init_pch_refclk(dev_priv);
15247
15248 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15249 }
15250
15251 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15252 {
15253 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15254 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15255
15256 drm_framebuffer_cleanup(fb);
15257
15258 i915_gem_object_lock(obj);
15259 WARN_ON(!obj->framebuffer_references--);
15260 i915_gem_object_unlock(obj);
15261
15262 i915_gem_object_put(obj);
15263
15264 kfree(intel_fb);
15265 }
15266
15267 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15268 struct drm_file *file,
15269 unsigned int *handle)
15270 {
15271 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15272
15273 if (obj->userptr.mm) {
15274 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15275 return -EINVAL;
15276 }
15277
15278 return drm_gem_handle_create(file, &obj->base, handle);
15279 }
15280
15281 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15282 struct drm_file *file,
15283 unsigned flags, unsigned color,
15284 struct drm_clip_rect *clips,
15285 unsigned num_clips)
15286 {
15287 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15288
15289 i915_gem_object_flush_if_display(obj);
15290 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15291
15292 return 0;
15293 }
15294
15295 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15296 .destroy = intel_user_framebuffer_destroy,
15297 .create_handle = intel_user_framebuffer_create_handle,
15298 .dirty = intel_user_framebuffer_dirty,
15299 };
15300
15301 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15302 struct drm_i915_gem_object *obj,
15303 struct drm_mode_fb_cmd2 *mode_cmd)
15304 {
15305 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15306 struct drm_framebuffer *fb = &intel_fb->base;
15307 u32 max_stride;
15308 unsigned int tiling, stride;
15309 int ret = -EINVAL;
15310 int i;
15311
15312 i915_gem_object_lock(obj);
15313 obj->framebuffer_references++;
15314 tiling = i915_gem_object_get_tiling(obj);
15315 stride = i915_gem_object_get_stride(obj);
15316 i915_gem_object_unlock(obj);
15317
15318 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15319 /*
15320 * If there's a fence, enforce that
15321 * the fb modifier and tiling mode match.
15322 */
15323 if (tiling != I915_TILING_NONE &&
15324 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15325 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15326 goto err;
15327 }
15328 } else {
15329 if (tiling == I915_TILING_X) {
15330 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15331 } else if (tiling == I915_TILING_Y) {
15332 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15333 goto err;
15334 }
15335 }
15336
15337 if (!drm_any_plane_has_format(&dev_priv->drm,
15338 mode_cmd->pixel_format,
15339 mode_cmd->modifier[0])) {
15340 struct drm_format_name_buf format_name;
15341
15342 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15343 drm_get_format_name(mode_cmd->pixel_format,
15344 &format_name),
15345 mode_cmd->modifier[0]);
15346 goto err;
15347 }
15348
15349 /*
15350 * gen2/3 display engine uses the fence if present,
15351 * so the tiling mode must match the fb modifier exactly.
15352 */
15353 if (INTEL_GEN(dev_priv) < 4 &&
15354 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15355 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15356 goto err;
15357 }
15358
15359 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15360 mode_cmd->modifier[0]);
15361 if (mode_cmd->pitches[0] > max_stride) {
15362 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15363 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15364 "tiled" : "linear",
15365 mode_cmd->pitches[0], max_stride);
15366 goto err;
15367 }
15368
15369 /*
15370 * If there's a fence, enforce that
15371 * the fb pitch and fence stride match.
15372 */
15373 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15374 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15375 mode_cmd->pitches[0], stride);
15376 goto err;
15377 }
15378
15379 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15380 if (mode_cmd->offsets[0] != 0)
15381 goto err;
15382
15383 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15384
15385 for (i = 0; i < fb->format->num_planes; i++) {
15386 u32 stride_alignment;
15387
15388 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15389 DRM_DEBUG_KMS("bad plane %d handle\n", i);
15390 goto err;
15391 }
15392
15393 stride_alignment = intel_fb_stride_alignment(fb, i);
15394
15395 /*
15396 * Display WA #0531: skl,bxt,kbl,glk
15397 *
15398 * Render decompression and plane width > 3840
15399 * combined with horizontal panning requires the
15400 * plane stride to be a multiple of 4. We'll just
15401 * require the entire fb to accommodate that to avoid
15402 * potential runtime errors at plane configuration time.
15403 */
15404 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15405 is_ccs_modifier(fb->modifier))
15406 stride_alignment *= 4;
15407
15408 if (fb->pitches[i] & (stride_alignment - 1)) {
15409 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15410 i, fb->pitches[i], stride_alignment);
15411 goto err;
15412 }
15413
15414 fb->obj[i] = &obj->base;
15415 }
15416
15417 ret = intel_fill_fb_info(dev_priv, fb);
15418 if (ret)
15419 goto err;
15420
15421 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15422 if (ret) {
15423 DRM_ERROR("framebuffer init failed %d\n", ret);
15424 goto err;
15425 }
15426
15427 return 0;
15428
15429 err:
15430 i915_gem_object_lock(obj);
15431 obj->framebuffer_references--;
15432 i915_gem_object_unlock(obj);
15433 return ret;
15434 }
15435
15436 static struct drm_framebuffer *
15437 intel_user_framebuffer_create(struct drm_device *dev,
15438 struct drm_file *filp,
15439 const struct drm_mode_fb_cmd2 *user_mode_cmd)
15440 {
15441 struct drm_framebuffer *fb;
15442 struct drm_i915_gem_object *obj;
15443 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15444
15445 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15446 if (!obj)
15447 return ERR_PTR(-ENOENT);
15448
15449 fb = intel_framebuffer_create(obj, &mode_cmd);
15450 if (IS_ERR(fb))
15451 i915_gem_object_put(obj);
15452
15453 return fb;
15454 }
15455
15456 static void intel_atomic_state_free(struct drm_atomic_state *state)
15457 {
15458 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15459
15460 drm_atomic_state_default_release(state);
15461
15462 i915_sw_fence_fini(&intel_state->commit_ready);
15463
15464 kfree(state);
15465 }
15466
15467 static enum drm_mode_status
15468 intel_mode_valid(struct drm_device *dev,
15469 const struct drm_display_mode *mode)
15470 {
15471 struct drm_i915_private *dev_priv = to_i915(dev);
15472 int hdisplay_max, htotal_max;
15473 int vdisplay_max, vtotal_max;
15474
15475 /*
15476 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15477 * of DBLSCAN modes to the output's mode list when they detect
15478 * the scaling mode property on the connector. And they don't
15479 * ask the kernel to validate those modes in any way until
15480 * modeset time at which point the client gets a protocol error.
15481 * So in order to not upset those clients we silently ignore the
15482 * DBLSCAN flag on such connectors. For other connectors we will
15483 * reject modes with the DBLSCAN flag in encoder->compute_config().
15484 * And we always reject DBLSCAN modes in connector->mode_valid()
15485 * as we never want such modes on the connector's mode list.
15486 */
15487
15488 if (mode->vscan > 1)
15489 return MODE_NO_VSCAN;
15490
15491 if (mode->flags & DRM_MODE_FLAG_HSKEW)
15492 return MODE_H_ILLEGAL;
15493
15494 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15495 DRM_MODE_FLAG_NCSYNC |
15496 DRM_MODE_FLAG_PCSYNC))
15497 return MODE_HSYNC;
15498
15499 if (mode->flags & (DRM_MODE_FLAG_BCAST |
15500 DRM_MODE_FLAG_PIXMUX |
15501 DRM_MODE_FLAG_CLKDIV2))
15502 return MODE_BAD;
15503
15504 if (INTEL_GEN(dev_priv) >= 9 ||
15505 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15506 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15507 vdisplay_max = 4096;
15508 htotal_max = 8192;
15509 vtotal_max = 8192;
15510 } else if (INTEL_GEN(dev_priv) >= 3) {
15511 hdisplay_max = 4096;
15512 vdisplay_max = 4096;
15513 htotal_max = 8192;
15514 vtotal_max = 8192;
15515 } else {
15516 hdisplay_max = 2048;
15517 vdisplay_max = 2048;
15518 htotal_max = 4096;
15519 vtotal_max = 4096;
15520 }
15521
15522 if (mode->hdisplay > hdisplay_max ||
15523 mode->hsync_start > htotal_max ||
15524 mode->hsync_end > htotal_max ||
15525 mode->htotal > htotal_max)
15526 return MODE_H_ILLEGAL;
15527
15528 if (mode->vdisplay > vdisplay_max ||
15529 mode->vsync_start > vtotal_max ||
15530 mode->vsync_end > vtotal_max ||
15531 mode->vtotal > vtotal_max)
15532 return MODE_V_ILLEGAL;
15533
15534 return MODE_OK;
15535 }
15536
15537 static const struct drm_mode_config_funcs intel_mode_funcs = {
15538 .fb_create = intel_user_framebuffer_create,
15539 .get_format_info = intel_get_format_info,
15540 .output_poll_changed = intel_fbdev_output_poll_changed,
15541 .mode_valid = intel_mode_valid,
15542 .atomic_check = intel_atomic_check,
15543 .atomic_commit = intel_atomic_commit,
15544 .atomic_state_alloc = intel_atomic_state_alloc,
15545 .atomic_state_clear = intel_atomic_state_clear,
15546 .atomic_state_free = intel_atomic_state_free,
15547 };
15548
15549 /**
15550 * intel_init_display_hooks - initialize the display modesetting hooks
15551 * @dev_priv: device private
15552 */
15553 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15554 {
15555 intel_init_cdclk_hooks(dev_priv);
15556
15557 if (INTEL_GEN(dev_priv) >= 9) {
15558 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15559 dev_priv->display.get_initial_plane_config =
15560 skylake_get_initial_plane_config;
15561 dev_priv->display.crtc_compute_clock =
15562 haswell_crtc_compute_clock;
15563 dev_priv->display.crtc_enable = haswell_crtc_enable;
15564 dev_priv->display.crtc_disable = haswell_crtc_disable;
15565 } else if (HAS_DDI(dev_priv)) {
15566 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15567 dev_priv->display.get_initial_plane_config =
15568 i9xx_get_initial_plane_config;
15569 dev_priv->display.crtc_compute_clock =
15570 haswell_crtc_compute_clock;
15571 dev_priv->display.crtc_enable = haswell_crtc_enable;
15572 dev_priv->display.crtc_disable = haswell_crtc_disable;
15573 } else if (HAS_PCH_SPLIT(dev_priv)) {
15574 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15575 dev_priv->display.get_initial_plane_config =
15576 i9xx_get_initial_plane_config;
15577 dev_priv->display.crtc_compute_clock =
15578 ironlake_crtc_compute_clock;
15579 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15580 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15581 } else if (IS_CHERRYVIEW(dev_priv)) {
15582 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15583 dev_priv->display.get_initial_plane_config =
15584 i9xx_get_initial_plane_config;
15585 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15586 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15587 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15588 } else if (IS_VALLEYVIEW(dev_priv)) {
15589 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15590 dev_priv->display.get_initial_plane_config =
15591 i9xx_get_initial_plane_config;
15592 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15593 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15594 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15595 } else if (IS_G4X(dev_priv)) {
15596 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15597 dev_priv->display.get_initial_plane_config =
15598 i9xx_get_initial_plane_config;
15599 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15600 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15601 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15602 } else if (IS_PINEVIEW(dev_priv)) {
15603 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15604 dev_priv->display.get_initial_plane_config =
15605 i9xx_get_initial_plane_config;
15606 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15607 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15608 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15609 } else if (!IS_GEN(dev_priv, 2)) {
15610 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15611 dev_priv->display.get_initial_plane_config =
15612 i9xx_get_initial_plane_config;
15613 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15614 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15615 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15616 } else {
15617 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15618 dev_priv->display.get_initial_plane_config =
15619 i9xx_get_initial_plane_config;
15620 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15621 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15622 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15623 }
15624
15625 if (IS_GEN(dev_priv, 5)) {
15626 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15627 } else if (IS_GEN(dev_priv, 6)) {
15628 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15629 } else if (IS_IVYBRIDGE(dev_priv)) {
15630 /* FIXME: detect B0+ stepping and use auto training */
15631 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15632 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15633 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15634 }
15635
15636 if (INTEL_GEN(dev_priv) >= 9)
15637 dev_priv->display.update_crtcs = skl_update_crtcs;
15638 else
15639 dev_priv->display.update_crtcs = intel_update_crtcs;
15640 }
15641
15642 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
15643 {
15644 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15645 return VLV_VGACNTRL;
15646 else if (INTEL_GEN(dev_priv) >= 5)
15647 return CPU_VGACNTRL;
15648 else
15649 return VGACNTRL;
15650 }
15651
15652 /* Disable the VGA plane that we never use */
15653 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15654 {
15655 struct pci_dev *pdev = dev_priv->drm.pdev;
15656 u8 sr1;
15657 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15658
15659 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15660 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15661 outb(SR01, VGA_SR_INDEX);
15662 sr1 = inb(VGA_SR_DATA);
15663 outb(sr1 | 1<<5, VGA_SR_DATA);
15664 vga_put(pdev, VGA_RSRC_LEGACY_IO);
15665 udelay(300);
15666
15667 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15668 POSTING_READ(vga_reg);
15669 }
15670
15671 void intel_modeset_init_hw(struct drm_device *dev)
15672 {
15673 struct drm_i915_private *dev_priv = to_i915(dev);
15674
15675 intel_update_cdclk(dev_priv);
15676 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15677 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15678 }
15679
15680 /*
15681 * Calculate what we think the watermarks should be for the state we've read
15682 * out of the hardware and then immediately program those watermarks so that
15683 * we ensure the hardware settings match our internal state.
15684 *
15685 * We can calculate what we think WM's should be by creating a duplicate of the
15686 * current state (which was constructed during hardware readout) and running it
15687 * through the atomic check code to calculate new watermark values in the
15688 * state object.
15689 */
15690 static void sanitize_watermarks(struct drm_device *dev)
15691 {
15692 struct drm_i915_private *dev_priv = to_i915(dev);
15693 struct drm_atomic_state *state;
15694 struct intel_atomic_state *intel_state;
15695 struct drm_crtc *crtc;
15696 struct drm_crtc_state *cstate;
15697 struct drm_modeset_acquire_ctx ctx;
15698 int ret;
15699 int i;
15700
15701 /* Only supported on platforms that use atomic watermark design */
15702 if (!dev_priv->display.optimize_watermarks)
15703 return;
15704
15705 /*
15706 * We need to hold connection_mutex before calling duplicate_state so
15707 * that the connector loop is protected.
15708 */
15709 drm_modeset_acquire_init(&ctx, 0);
15710 retry:
15711 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15712 if (ret == -EDEADLK) {
15713 drm_modeset_backoff(&ctx);
15714 goto retry;
15715 } else if (WARN_ON(ret)) {
15716 goto fail;
15717 }
15718
15719 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15720 if (WARN_ON(IS_ERR(state)))
15721 goto fail;
15722
15723 intel_state = to_intel_atomic_state(state);
15724
15725 /*
15726 * Hardware readout is the only time we don't want to calculate
15727 * intermediate watermarks (since we don't trust the current
15728 * watermarks).
15729 */
15730 if (!HAS_GMCH(dev_priv))
15731 intel_state->skip_intermediate_wm = true;
15732
15733 ret = intel_atomic_check(dev, state);
15734 if (ret) {
15735 /*
15736 * If we fail here, it means that the hardware appears to be
15737 * programmed in a way that shouldn't be possible, given our
15738 * understanding of watermark requirements. This might mean a
15739 * mistake in the hardware readout code or a mistake in the
15740 * watermark calculations for a given platform. Raise a WARN
15741 * so that this is noticeable.
15742 *
15743 * If this actually happens, we'll have to just leave the
15744 * BIOS-programmed watermarks untouched and hope for the best.
15745 */
15746 WARN(true, "Could not determine valid watermarks for inherited state\n");
15747 goto put_state;
15748 }
15749
15750 /* Write calculated watermark values back */
15751 for_each_new_crtc_in_state(state, crtc, cstate, i) {
15752 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15753
15754 cs->wm.need_postvbl_update = true;
15755 dev_priv->display.optimize_watermarks(intel_state, cs);
15756
15757 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15758 }
15759
15760 put_state:
15761 drm_atomic_state_put(state);
15762 fail:
15763 drm_modeset_drop_locks(&ctx);
15764 drm_modeset_acquire_fini(&ctx);
15765 }
15766
15767 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15768 {
15769 if (IS_GEN(dev_priv, 5)) {
15770 u32 fdi_pll_clk =
15771 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15772
15773 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15774 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15775 dev_priv->fdi_pll_freq = 270000;
15776 } else {
15777 return;
15778 }
15779
15780 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15781 }
15782
15783 static int intel_initial_commit(struct drm_device *dev)
15784 {
15785 struct drm_atomic_state *state = NULL;
15786 struct drm_modeset_acquire_ctx ctx;
15787 struct drm_crtc *crtc;
15788 struct drm_crtc_state *crtc_state;
15789 int ret = 0;
15790
15791 state = drm_atomic_state_alloc(dev);
15792 if (!state)
15793 return -ENOMEM;
15794
15795 drm_modeset_acquire_init(&ctx, 0);
15796
15797 retry:
15798 state->acquire_ctx = &ctx;
15799
15800 drm_for_each_crtc(crtc, dev) {
15801 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15802 if (IS_ERR(crtc_state)) {
15803 ret = PTR_ERR(crtc_state);
15804 goto out;
15805 }
15806
15807 if (crtc_state->active) {
15808 ret = drm_atomic_add_affected_planes(state, crtc);
15809 if (ret)
15810 goto out;
15811
15812 /*
15813 * FIXME hack to force a LUT update to avoid the
15814 * plane update forcing the pipe gamma on without
15815 * having a proper LUT loaded. Remove once we
15816 * have readout for pipe gamma enable.
15817 */
15818 crtc_state->color_mgmt_changed = true;
15819 }
15820 }
15821
15822 ret = drm_atomic_commit(state);
15823
15824 out:
15825 if (ret == -EDEADLK) {
15826 drm_atomic_state_clear(state);
15827 drm_modeset_backoff(&ctx);
15828 goto retry;
15829 }
15830
15831 drm_atomic_state_put(state);
15832
15833 drm_modeset_drop_locks(&ctx);
15834 drm_modeset_acquire_fini(&ctx);
15835
15836 return ret;
15837 }
15838
15839 int intel_modeset_init(struct drm_device *dev)
15840 {
15841 struct drm_i915_private *dev_priv = to_i915(dev);
15842 struct i915_ggtt *ggtt = &dev_priv->ggtt;
15843 enum pipe pipe;
15844 struct intel_crtc *crtc;
15845 int ret;
15846
15847 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15848
15849 drm_mode_config_init(dev);
15850
15851 ret = intel_bw_init(dev_priv);
15852 if (ret)
15853 return ret;
15854
15855 dev->mode_config.min_width = 0;
15856 dev->mode_config.min_height = 0;
15857
15858 dev->mode_config.preferred_depth = 24;
15859 dev->mode_config.prefer_shadow = 1;
15860
15861 dev->mode_config.allow_fb_modifiers = true;
15862
15863 dev->mode_config.funcs = &intel_mode_funcs;
15864
15865 init_llist_head(&dev_priv->atomic_helper.free_list);
15866 INIT_WORK(&dev_priv->atomic_helper.free_work,
15867 intel_atomic_helper_free_state_worker);
15868
15869 intel_init_quirks(dev_priv);
15870
15871 intel_fbc_init(dev_priv);
15872
15873 intel_init_pm(dev_priv);
15874
15875 /*
15876 * There may be no VBT; and if the BIOS enabled SSC we can
15877 * just keep using it to avoid unnecessary flicker. Whereas if the
15878 * BIOS isn't using it, don't assume it will work even if the VBT
15879 * indicates as much.
15880 */
15881 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15882 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15883 DREF_SSC1_ENABLE);
15884
15885 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15886 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15887 bios_lvds_use_ssc ? "en" : "dis",
15888 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15889 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15890 }
15891 }
15892
15893 /*
15894 * Maximum framebuffer dimensions, chosen to match
15895 * the maximum render engine surface size on gen4+.
15896 */
15897 if (INTEL_GEN(dev_priv) >= 7) {
15898 dev->mode_config.max_width = 16384;
15899 dev->mode_config.max_height = 16384;
15900 } else if (INTEL_GEN(dev_priv) >= 4) {
15901 dev->mode_config.max_width = 8192;
15902 dev->mode_config.max_height = 8192;
15903 } else if (IS_GEN(dev_priv, 3)) {
15904 dev->mode_config.max_width = 4096;
15905 dev->mode_config.max_height = 4096;
15906 } else {
15907 dev->mode_config.max_width = 2048;
15908 dev->mode_config.max_height = 2048;
15909 }
15910
15911 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15912 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15913 dev->mode_config.cursor_height = 1023;
15914 } else if (IS_GEN(dev_priv, 2)) {
15915 dev->mode_config.cursor_width = 64;
15916 dev->mode_config.cursor_height = 64;
15917 } else {
15918 dev->mode_config.cursor_width = 256;
15919 dev->mode_config.cursor_height = 256;
15920 }
15921
15922 dev->mode_config.fb_base = ggtt->gmadr.start;
15923
15924 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15925 INTEL_INFO(dev_priv)->num_pipes,
15926 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15927
15928 for_each_pipe(dev_priv, pipe) {
15929 ret = intel_crtc_init(dev_priv, pipe);
15930 if (ret) {
15931 drm_mode_config_cleanup(dev);
15932 return ret;
15933 }
15934 }
15935
15936 intel_shared_dpll_init(dev);
15937 intel_update_fdi_pll_freq(dev_priv);
15938
15939 intel_update_czclk(dev_priv);
15940 intel_modeset_init_hw(dev);
15941
15942 intel_hdcp_component_init(dev_priv);
15943
15944 if (dev_priv->max_cdclk_freq == 0)
15945 intel_update_max_cdclk(dev_priv);
15946
15947 /* Just disable it once at startup */
15948 i915_disable_vga(dev_priv);
15949 intel_setup_outputs(dev_priv);
15950
15951 drm_modeset_lock_all(dev);
15952 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15953 drm_modeset_unlock_all(dev);
15954
15955 for_each_intel_crtc(dev, crtc) {
15956 struct intel_initial_plane_config plane_config = {};
15957
15958 if (!crtc->active)
15959 continue;
15960
15961 /*
15962 * Note that reserving the BIOS fb up front prevents us
15963 * from stuffing other stolen allocations like the ring
15964 * on top. This prevents some ugliness at boot time, and
15965 * can even allow for smooth boot transitions if the BIOS
15966 * fb is large enough for the active pipe configuration.
15967 */
15968 dev_priv->display.get_initial_plane_config(crtc,
15969 &plane_config);
15970
15971 /*
15972 * If the fb is shared between multiple heads, we'll
15973 * just get the first one.
15974 */
15975 intel_find_initial_plane_obj(crtc, &plane_config);
15976 }
15977
15978 /*
15979 * Make sure hardware watermarks really match the state we read out.
15980 * Note that we need to do this after reconstructing the BIOS fb's
15981 * since the watermark calculation done here will use pstate->fb.
15982 */
15983 if (!HAS_GMCH(dev_priv))
15984 sanitize_watermarks(dev);
15985
15986 /*
15987 * Force all active planes to recompute their states. So that on
15988 * mode_setcrtc after probe, all the intel_plane_state variables
15989 * are already calculated and there is no assert_plane warnings
15990 * during bootup.
15991 */
15992 ret = intel_initial_commit(dev);
15993 if (ret)
15994 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15995
15996 return 0;
15997 }
15998
15999 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16000 {
16001 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16002 /* 640x480@60Hz, ~25175 kHz */
16003 struct dpll clock = {
16004 .m1 = 18,
16005 .m2 = 7,
16006 .p1 = 13,
16007 .p2 = 4,
16008 .n = 2,
16009 };
16010 u32 dpll, fp;
16011 int i;
16012
16013 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16014
16015 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16016 pipe_name(pipe), clock.vco, clock.dot);
16017
16018 fp = i9xx_dpll_compute_fp(&clock);
16019 dpll = DPLL_DVO_2X_MODE |
16020 DPLL_VGA_MODE_DIS |
16021 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16022 PLL_P2_DIVIDE_BY_4 |
16023 PLL_REF_INPUT_DREFCLK |
16024 DPLL_VCO_ENABLE;
16025
16026 I915_WRITE(FP0(pipe), fp);
16027 I915_WRITE(FP1(pipe), fp);
16028
16029 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16030 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16031 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16032 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16033 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16034 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16035 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16036
16037 /*
16038 * Apparently we need to have VGA mode enabled prior to changing
16039 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16040 * dividers, even though the register value does change.
16041 */
16042 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16043 I915_WRITE(DPLL(pipe), dpll);
16044
16045 /* Wait for the clocks to stabilize. */
16046 POSTING_READ(DPLL(pipe));
16047 udelay(150);
16048
16049 /* The pixel multiplier can only be updated once the
16050 * DPLL is enabled and the clocks are stable.
16051 *
16052 * So write it again.
16053 */
16054 I915_WRITE(DPLL(pipe), dpll);
16055
16056 /* We do this three times for luck */
16057 for (i = 0; i < 3 ; i++) {
16058 I915_WRITE(DPLL(pipe), dpll);
16059 POSTING_READ(DPLL(pipe));
16060 udelay(150); /* wait for warmup */
16061 }
16062
16063 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16064 POSTING_READ(PIPECONF(pipe));
16065
16066 intel_wait_for_pipe_scanline_moving(crtc);
16067 }
16068
16069 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16070 {
16071 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16072
16073 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16074 pipe_name(pipe));
16075
16076 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16077 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16078 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
16079 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16080 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
16081
16082 I915_WRITE(PIPECONF(pipe), 0);
16083 POSTING_READ(PIPECONF(pipe));
16084
16085 intel_wait_for_pipe_scanline_stopped(crtc);
16086
16087 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16088 POSTING_READ(DPLL(pipe));
16089 }
16090
16091 static void
16092 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16093 {
16094 struct intel_crtc *crtc;
16095
16096 if (INTEL_GEN(dev_priv) >= 4)
16097 return;
16098
16099 for_each_intel_crtc(&dev_priv->drm, crtc) {
16100 struct intel_plane *plane =
16101 to_intel_plane(crtc->base.primary);
16102 struct intel_crtc *plane_crtc;
16103 enum pipe pipe;
16104
16105 if (!plane->get_hw_state(plane, &pipe))
16106 continue;
16107
16108 if (pipe == crtc->pipe)
16109 continue;
16110
16111 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16112 plane->base.base.id, plane->base.name);
16113
16114 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16115 intel_plane_disable_noatomic(plane_crtc, plane);
16116 }
16117 }
16118
16119 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16120 {
16121 struct drm_device *dev = crtc->base.dev;
16122 struct intel_encoder *encoder;
16123
16124 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16125 return true;
16126
16127 return false;
16128 }
16129
16130 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16131 {
16132 struct drm_device *dev = encoder->base.dev;
16133 struct intel_connector *connector;
16134
16135 for_each_connector_on_encoder(dev, &encoder->base, connector)
16136 return connector;
16137
16138 return NULL;
16139 }
16140
16141 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16142 enum pipe pch_transcoder)
16143 {
16144 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16145 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
16146 }
16147
16148 static void intel_sanitize_crtc(struct intel_crtc *crtc,
16149 struct drm_modeset_acquire_ctx *ctx)
16150 {
16151 struct drm_device *dev = crtc->base.dev;
16152 struct drm_i915_private *dev_priv = to_i915(dev);
16153 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16154 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
16155
16156 /* Clear any frame start delays used for debugging left by the BIOS */
16157 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
16158 i915_reg_t reg = PIPECONF(cpu_transcoder);
16159
16160 I915_WRITE(reg,
16161 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16162 }
16163
16164 if (crtc_state->base.active) {
16165 struct intel_plane *plane;
16166
16167 /* Disable everything but the primary plane */
16168 for_each_intel_plane_on_crtc(dev, crtc, plane) {
16169 const struct intel_plane_state *plane_state =
16170 to_intel_plane_state(plane->base.state);
16171
16172 if (plane_state->base.visible &&
16173 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16174 intel_plane_disable_noatomic(crtc, plane);
16175 }
16176
16177 /*
16178 * Disable any background color set by the BIOS, but enable the
16179 * gamma and CSC to match how we program our planes.
16180 */
16181 if (INTEL_GEN(dev_priv) >= 9)
16182 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16183 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16184 SKL_BOTTOM_COLOR_CSC_ENABLE);
16185 }
16186
16187 /* Adjust the state of the output pipe according to whether we
16188 * have active connectors/encoders. */
16189 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
16190 intel_crtc_disable_noatomic(&crtc->base, ctx);
16191
16192 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
16193 /*
16194 * We start out with underrun reporting disabled to avoid races.
16195 * For correct bookkeeping mark this on active crtcs.
16196 *
16197 * Also on gmch platforms we dont have any hardware bits to
16198 * disable the underrun reporting. Which means we need to start
16199 * out with underrun reporting disabled also on inactive pipes,
16200 * since otherwise we'll complain about the garbage we read when
16201 * e.g. coming up after runtime pm.
16202 *
16203 * No protection against concurrent access is required - at
16204 * worst a fifo underrun happens which also sets this to false.
16205 */
16206 crtc->cpu_fifo_underrun_disabled = true;
16207 /*
16208 * We track the PCH trancoder underrun reporting state
16209 * within the crtc. With crtc for pipe A housing the underrun
16210 * reporting state for PCH transcoder A, crtc for pipe B housing
16211 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16212 * and marking underrun reporting as disabled for the non-existing
16213 * PCH transcoders B and C would prevent enabling the south
16214 * error interrupt (see cpt_can_enable_serr_int()).
16215 */
16216 if (has_pch_trancoder(dev_priv, crtc->pipe))
16217 crtc->pch_fifo_underrun_disabled = true;
16218 }
16219 }
16220
16221 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16222 {
16223 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16224
16225 /*
16226 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16227 * the hardware when a high res displays plugged in. DPLL P
16228 * divider is zero, and the pipe timings are bonkers. We'll
16229 * try to disable everything in that case.
16230 *
16231 * FIXME would be nice to be able to sanitize this state
16232 * without several WARNs, but for now let's take the easy
16233 * road.
16234 */
16235 return IS_GEN(dev_priv, 6) &&
16236 crtc_state->base.active &&
16237 crtc_state->shared_dpll &&
16238 crtc_state->port_clock == 0;
16239 }
16240
16241 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16242 {
16243 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16244 struct intel_connector *connector;
16245 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16246 struct intel_crtc_state *crtc_state = crtc ?
16247 to_intel_crtc_state(crtc->base.state) : NULL;
16248
16249 /* We need to check both for a crtc link (meaning that the
16250 * encoder is active and trying to read from a pipe) and the
16251 * pipe itself being active. */
16252 bool has_active_crtc = crtc_state &&
16253 crtc_state->base.active;
16254
16255 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16256 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16257 pipe_name(crtc->pipe));
16258 has_active_crtc = false;
16259 }
16260
16261 connector = intel_encoder_find_connector(encoder);
16262 if (connector && !has_active_crtc) {
16263 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16264 encoder->base.base.id,
16265 encoder->base.name);
16266
16267 /* Connector is active, but has no active pipe. This is
16268 * fallout from our resume register restoring. Disable
16269 * the encoder manually again. */
16270 if (crtc_state) {
16271 struct drm_encoder *best_encoder;
16272
16273 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16274 encoder->base.base.id,
16275 encoder->base.name);
16276
16277 /* avoid oopsing in case the hooks consult best_encoder */
16278 best_encoder = connector->base.state->best_encoder;
16279 connector->base.state->best_encoder = &encoder->base;
16280
16281 if (encoder->disable)
16282 encoder->disable(encoder, crtc_state,
16283 connector->base.state);
16284 if (encoder->post_disable)
16285 encoder->post_disable(encoder, crtc_state,
16286 connector->base.state);
16287
16288 connector->base.state->best_encoder = best_encoder;
16289 }
16290 encoder->base.crtc = NULL;
16291
16292 /* Inconsistent output/port/pipe state happens presumably due to
16293 * a bug in one of the get_hw_state functions. Or someplace else
16294 * in our code, like the register restore mess on resume. Clamp
16295 * things to off as a safer default. */
16296
16297 connector->base.dpms = DRM_MODE_DPMS_OFF;
16298 connector->base.encoder = NULL;
16299 }
16300
16301 /* notify opregion of the sanitized encoder state */
16302 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16303
16304 if (INTEL_GEN(dev_priv) >= 11)
16305 icl_sanitize_encoder_pll_mapping(encoder);
16306 }
16307
16308 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16309 {
16310 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16311
16312 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16313 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16314 i915_disable_vga(dev_priv);
16315 }
16316 }
16317
16318 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16319 {
16320 intel_wakeref_t wakeref;
16321
16322 /*
16323 * This function can be called both from intel_modeset_setup_hw_state or
16324 * at a very early point in our resume sequence, where the power well
16325 * structures are not yet restored. Since this function is at a very
16326 * paranoid "someone might have enabled VGA while we were not looking"
16327 * level, just check if the power well is enabled instead of trying to
16328 * follow the "don't touch the power well if we don't need it" policy
16329 * the rest of the driver uses.
16330 */
16331 wakeref = intel_display_power_get_if_enabled(dev_priv,
16332 POWER_DOMAIN_VGA);
16333 if (!wakeref)
16334 return;
16335
16336 i915_redisable_vga_power_on(dev_priv);
16337
16338 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16339 }
16340
16341 /* FIXME read out full plane state for all planes */
16342 static void readout_plane_state(struct drm_i915_private *dev_priv)
16343 {
16344 struct intel_plane *plane;
16345 struct intel_crtc *crtc;
16346
16347 for_each_intel_plane(&dev_priv->drm, plane) {
16348 struct intel_plane_state *plane_state =
16349 to_intel_plane_state(plane->base.state);
16350 struct intel_crtc_state *crtc_state;
16351 enum pipe pipe = PIPE_A;
16352 bool visible;
16353
16354 visible = plane->get_hw_state(plane, &pipe);
16355
16356 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16357 crtc_state = to_intel_crtc_state(crtc->base.state);
16358
16359 intel_set_plane_visible(crtc_state, plane_state, visible);
16360
16361 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16362 plane->base.base.id, plane->base.name,
16363 enableddisabled(visible), pipe_name(pipe));
16364 }
16365
16366 for_each_intel_crtc(&dev_priv->drm, crtc) {
16367 struct intel_crtc_state *crtc_state =
16368 to_intel_crtc_state(crtc->base.state);
16369
16370 fixup_active_planes(crtc_state);
16371 }
16372 }
16373
16374 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16375 {
16376 struct drm_i915_private *dev_priv = to_i915(dev);
16377 enum pipe pipe;
16378 struct intel_crtc *crtc;
16379 struct intel_encoder *encoder;
16380 struct intel_connector *connector;
16381 struct drm_connector_list_iter conn_iter;
16382 int i;
16383
16384 dev_priv->active_crtcs = 0;
16385
16386 for_each_intel_crtc(dev, crtc) {
16387 struct intel_crtc_state *crtc_state =
16388 to_intel_crtc_state(crtc->base.state);
16389
16390 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16391 memset(crtc_state, 0, sizeof(*crtc_state));
16392 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
16393
16394 crtc_state->base.active = crtc_state->base.enable =
16395 dev_priv->display.get_pipe_config(crtc, crtc_state);
16396
16397 crtc->base.enabled = crtc_state->base.enable;
16398 crtc->active = crtc_state->base.active;
16399
16400 if (crtc_state->base.active)
16401 dev_priv->active_crtcs |= 1 << crtc->pipe;
16402
16403 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16404 crtc->base.base.id, crtc->base.name,
16405 enableddisabled(crtc_state->base.active));
16406 }
16407
16408 readout_plane_state(dev_priv);
16409
16410 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16411 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16412
16413 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16414 &pll->state.hw_state);
16415 pll->state.crtc_mask = 0;
16416 for_each_intel_crtc(dev, crtc) {
16417 struct intel_crtc_state *crtc_state =
16418 to_intel_crtc_state(crtc->base.state);
16419
16420 if (crtc_state->base.active &&
16421 crtc_state->shared_dpll == pll)
16422 pll->state.crtc_mask |= 1 << crtc->pipe;
16423 }
16424 pll->active_mask = pll->state.crtc_mask;
16425
16426 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16427 pll->info->name, pll->state.crtc_mask, pll->on);
16428 }
16429
16430 for_each_intel_encoder(dev, encoder) {
16431 pipe = 0;
16432
16433 if (encoder->get_hw_state(encoder, &pipe)) {
16434 struct intel_crtc_state *crtc_state;
16435
16436 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16437 crtc_state = to_intel_crtc_state(crtc->base.state);
16438
16439 encoder->base.crtc = &crtc->base;
16440 encoder->get_config(encoder, crtc_state);
16441 } else {
16442 encoder->base.crtc = NULL;
16443 }
16444
16445 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16446 encoder->base.base.id, encoder->base.name,
16447 enableddisabled(encoder->base.crtc),
16448 pipe_name(pipe));
16449 }
16450
16451 drm_connector_list_iter_begin(dev, &conn_iter);
16452 for_each_intel_connector_iter(connector, &conn_iter) {
16453 if (connector->get_hw_state(connector)) {
16454 connector->base.dpms = DRM_MODE_DPMS_ON;
16455
16456 encoder = connector->encoder;
16457 connector->base.encoder = &encoder->base;
16458
16459 if (encoder->base.crtc &&
16460 encoder->base.crtc->state->active) {
16461 /*
16462 * This has to be done during hardware readout
16463 * because anything calling .crtc_disable may
16464 * rely on the connector_mask being accurate.
16465 */
16466 encoder->base.crtc->state->connector_mask |=
16467 drm_connector_mask(&connector->base);
16468 encoder->base.crtc->state->encoder_mask |=
16469 drm_encoder_mask(&encoder->base);
16470 }
16471
16472 } else {
16473 connector->base.dpms = DRM_MODE_DPMS_OFF;
16474 connector->base.encoder = NULL;
16475 }
16476 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16477 connector->base.base.id, connector->base.name,
16478 enableddisabled(connector->base.encoder));
16479 }
16480 drm_connector_list_iter_end(&conn_iter);
16481
16482 for_each_intel_crtc(dev, crtc) {
16483 struct intel_bw_state *bw_state =
16484 to_intel_bw_state(dev_priv->bw_obj.state);
16485 struct intel_crtc_state *crtc_state =
16486 to_intel_crtc_state(crtc->base.state);
16487 struct intel_plane *plane;
16488 int min_cdclk = 0;
16489
16490 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16491 if (crtc_state->base.active) {
16492 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16493 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16494 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16495 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16496 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16497
16498 /*
16499 * The initial mode needs to be set in order to keep
16500 * the atomic core happy. It wants a valid mode if the
16501 * crtc's enabled, so we do the above call.
16502 *
16503 * But we don't set all the derived state fully, hence
16504 * set a flag to indicate that a full recalculation is
16505 * needed on the next commit.
16506 */
16507 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16508
16509 intel_crtc_compute_pixel_rate(crtc_state);
16510
16511 if (dev_priv->display.modeset_calc_cdclk) {
16512 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16513 if (WARN_ON(min_cdclk < 0))
16514 min_cdclk = 0;
16515 }
16516
16517 drm_calc_timestamping_constants(&crtc->base,
16518 &crtc_state->base.adjusted_mode);
16519 update_scanline_offset(crtc_state);
16520 }
16521
16522 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16523 dev_priv->min_voltage_level[crtc->pipe] =
16524 crtc_state->min_voltage_level;
16525
16526 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16527 const struct intel_plane_state *plane_state =
16528 to_intel_plane_state(plane->base.state);
16529
16530 /*
16531 * FIXME don't have the fb yet, so can't
16532 * use intel_plane_data_rate() :(
16533 */
16534 if (plane_state->base.visible)
16535 crtc_state->data_rate[plane->id] =
16536 4 * crtc_state->pixel_rate;
16537 }
16538
16539 intel_bw_crtc_update(bw_state, crtc_state);
16540
16541 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16542 }
16543 }
16544
16545 static void
16546 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16547 {
16548 struct intel_encoder *encoder;
16549
16550 for_each_intel_encoder(&dev_priv->drm, encoder) {
16551 struct intel_crtc_state *crtc_state;
16552
16553 if (!encoder->get_power_domains)
16554 continue;
16555
16556 /*
16557 * MST-primary and inactive encoders don't have a crtc state
16558 * and neither of these require any power domain references.
16559 */
16560 if (!encoder->base.crtc)
16561 continue;
16562
16563 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16564 encoder->get_power_domains(encoder, crtc_state);
16565 }
16566 }
16567
16568 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16569 {
16570 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16571 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16572 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16573 DARBF_GATING_DIS);
16574
16575 if (IS_HASWELL(dev_priv)) {
16576 /*
16577 * WaRsPkgCStateDisplayPMReq:hsw
16578 * System hang if this isn't done before disabling all planes!
16579 */
16580 I915_WRITE(CHICKEN_PAR1_1,
16581 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16582 }
16583 }
16584
16585 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16586 enum port port, i915_reg_t hdmi_reg)
16587 {
16588 u32 val = I915_READ(hdmi_reg);
16589
16590 if (val & SDVO_ENABLE ||
16591 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16592 return;
16593
16594 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16595 port_name(port));
16596
16597 val &= ~SDVO_PIPE_SEL_MASK;
16598 val |= SDVO_PIPE_SEL(PIPE_A);
16599
16600 I915_WRITE(hdmi_reg, val);
16601 }
16602
16603 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16604 enum port port, i915_reg_t dp_reg)
16605 {
16606 u32 val = I915_READ(dp_reg);
16607
16608 if (val & DP_PORT_EN ||
16609 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16610 return;
16611
16612 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16613 port_name(port));
16614
16615 val &= ~DP_PIPE_SEL_MASK;
16616 val |= DP_PIPE_SEL(PIPE_A);
16617
16618 I915_WRITE(dp_reg, val);
16619 }
16620
16621 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16622 {
16623 /*
16624 * The BIOS may select transcoder B on some of the PCH
16625 * ports even it doesn't enable the port. This would trip
16626 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16627 * Sanitize the transcoder select bits to prevent that. We
16628 * assume that the BIOS never actually enabled the port,
16629 * because if it did we'd actually have to toggle the port
16630 * on and back off to make the transcoder A select stick
16631 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16632 * intel_disable_sdvo()).
16633 */
16634 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16635 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16636 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16637
16638 /* PCH SDVOB multiplex with HDMIB */
16639 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16640 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16641 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16642 }
16643
16644 /* Scan out the current hw modeset state,
16645 * and sanitizes it to the current state
16646 */
16647 static void
16648 intel_modeset_setup_hw_state(struct drm_device *dev,
16649 struct drm_modeset_acquire_ctx *ctx)
16650 {
16651 struct drm_i915_private *dev_priv = to_i915(dev);
16652 struct intel_crtc_state *crtc_state;
16653 struct intel_encoder *encoder;
16654 struct intel_crtc *crtc;
16655 intel_wakeref_t wakeref;
16656 int i;
16657
16658 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16659
16660 intel_early_display_was(dev_priv);
16661 intel_modeset_readout_hw_state(dev);
16662
16663 /* HW state is read out, now we need to sanitize this mess. */
16664 get_encoder_power_domains(dev_priv);
16665
16666 if (HAS_PCH_IBX(dev_priv))
16667 ibx_sanitize_pch_ports(dev_priv);
16668
16669 /*
16670 * intel_sanitize_plane_mapping() may need to do vblank
16671 * waits, so we need vblank interrupts restored beforehand.
16672 */
16673 for_each_intel_crtc(&dev_priv->drm, crtc) {
16674 crtc_state = to_intel_crtc_state(crtc->base.state);
16675
16676 drm_crtc_vblank_reset(&crtc->base);
16677
16678 if (crtc_state->base.active)
16679 intel_crtc_vblank_on(crtc_state);
16680 }
16681
16682 intel_sanitize_plane_mapping(dev_priv);
16683
16684 for_each_intel_encoder(dev, encoder)
16685 intel_sanitize_encoder(encoder);
16686
16687 for_each_intel_crtc(&dev_priv->drm, crtc) {
16688 crtc_state = to_intel_crtc_state(crtc->base.state);
16689 intel_sanitize_crtc(crtc, ctx);
16690 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
16691 }
16692
16693 intel_modeset_update_connector_atomic_state(dev);
16694
16695 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16696 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16697
16698 if (!pll->on || pll->active_mask)
16699 continue;
16700
16701 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16702 pll->info->name);
16703
16704 pll->info->funcs->disable(dev_priv, pll);
16705 pll->on = false;
16706 }
16707
16708 if (IS_G4X(dev_priv)) {
16709 g4x_wm_get_hw_state(dev_priv);
16710 g4x_wm_sanitize(dev_priv);
16711 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16712 vlv_wm_get_hw_state(dev_priv);
16713 vlv_wm_sanitize(dev_priv);
16714 } else if (INTEL_GEN(dev_priv) >= 9) {
16715 skl_wm_get_hw_state(dev_priv);
16716 } else if (HAS_PCH_SPLIT(dev_priv)) {
16717 ilk_wm_get_hw_state(dev_priv);
16718 }
16719
16720 for_each_intel_crtc(dev, crtc) {
16721 u64 put_domains;
16722
16723 crtc_state = to_intel_crtc_state(crtc->base.state);
16724 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16725 if (WARN_ON(put_domains))
16726 modeset_put_power_domains(dev_priv, put_domains);
16727 }
16728
16729 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16730
16731 intel_fbc_init_pipe_state(dev_priv);
16732 }
16733
16734 void intel_display_resume(struct drm_device *dev)
16735 {
16736 struct drm_i915_private *dev_priv = to_i915(dev);
16737 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16738 struct drm_modeset_acquire_ctx ctx;
16739 int ret;
16740
16741 dev_priv->modeset_restore_state = NULL;
16742 if (state)
16743 state->acquire_ctx = &ctx;
16744
16745 drm_modeset_acquire_init(&ctx, 0);
16746
16747 while (1) {
16748 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16749 if (ret != -EDEADLK)
16750 break;
16751
16752 drm_modeset_backoff(&ctx);
16753 }
16754
16755 if (!ret)
16756 ret = __intel_display_resume(dev, state, &ctx);
16757
16758 intel_enable_ipc(dev_priv);
16759 drm_modeset_drop_locks(&ctx);
16760 drm_modeset_acquire_fini(&ctx);
16761
16762 if (ret)
16763 DRM_ERROR("Restoring old state failed with %i\n", ret);
16764 if (state)
16765 drm_atomic_state_put(state);
16766 }
16767
16768 static void intel_hpd_poll_fini(struct drm_device *dev)
16769 {
16770 struct intel_connector *connector;
16771 struct drm_connector_list_iter conn_iter;
16772
16773 /* Kill all the work that may have been queued by hpd. */
16774 drm_connector_list_iter_begin(dev, &conn_iter);
16775 for_each_intel_connector_iter(connector, &conn_iter) {
16776 if (connector->modeset_retry_work.func)
16777 cancel_work_sync(&connector->modeset_retry_work);
16778 if (connector->hdcp.shim) {
16779 cancel_delayed_work_sync(&connector->hdcp.check_work);
16780 cancel_work_sync(&connector->hdcp.prop_work);
16781 }
16782 }
16783 drm_connector_list_iter_end(&conn_iter);
16784 }
16785
16786 void intel_modeset_cleanup(struct drm_device *dev)
16787 {
16788 struct drm_i915_private *dev_priv = to_i915(dev);
16789
16790 flush_workqueue(dev_priv->modeset_wq);
16791
16792 flush_work(&dev_priv->atomic_helper.free_work);
16793 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16794
16795 /*
16796 * Interrupts and polling as the first thing to avoid creating havoc.
16797 * Too much stuff here (turning of connectors, ...) would
16798 * experience fancy races otherwise.
16799 */
16800 intel_irq_uninstall(dev_priv);
16801
16802 /*
16803 * Due to the hpd irq storm handling the hotplug work can re-arm the
16804 * poll handlers. Hence disable polling after hpd handling is shut down.
16805 */
16806 intel_hpd_poll_fini(dev);
16807
16808 /* poll work can call into fbdev, hence clean that up afterwards */
16809 intel_fbdev_fini(dev_priv);
16810
16811 intel_unregister_dsm_handler();
16812
16813 intel_fbc_global_disable(dev_priv);
16814
16815 /* flush any delayed tasks or pending work */
16816 flush_scheduled_work();
16817
16818 intel_hdcp_component_fini(dev_priv);
16819
16820 drm_mode_config_cleanup(dev);
16821
16822 intel_overlay_cleanup(dev_priv);
16823
16824 intel_gmbus_teardown(dev_priv);
16825
16826 destroy_workqueue(dev_priv->modeset_wq);
16827
16828 intel_fbc_cleanup_cfb(dev_priv);
16829 }
16830
16831 /*
16832 * set vga decode state - true == enable VGA decode
16833 */
16834 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16835 {
16836 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16837 u16 gmch_ctrl;
16838
16839 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16840 DRM_ERROR("failed to read control word\n");
16841 return -EIO;
16842 }
16843
16844 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16845 return 0;
16846
16847 if (state)
16848 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16849 else
16850 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16851
16852 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16853 DRM_ERROR("failed to write control word\n");
16854 return -EIO;
16855 }
16856
16857 return 0;
16858 }
16859
16860 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16861
16862 struct intel_display_error_state {
16863
16864 u32 power_well_driver;
16865
16866 struct intel_cursor_error_state {
16867 u32 control;
16868 u32 position;
16869 u32 base;
16870 u32 size;
16871 } cursor[I915_MAX_PIPES];
16872
16873 struct intel_pipe_error_state {
16874 bool power_domain_on;
16875 u32 source;
16876 u32 stat;
16877 } pipe[I915_MAX_PIPES];
16878
16879 struct intel_plane_error_state {
16880 u32 control;
16881 u32 stride;
16882 u32 size;
16883 u32 pos;
16884 u32 addr;
16885 u32 surface;
16886 u32 tile_offset;
16887 } plane[I915_MAX_PIPES];
16888
16889 struct intel_transcoder_error_state {
16890 bool available;
16891 bool power_domain_on;
16892 enum transcoder cpu_transcoder;
16893
16894 u32 conf;
16895
16896 u32 htotal;
16897 u32 hblank;
16898 u32 hsync;
16899 u32 vtotal;
16900 u32 vblank;
16901 u32 vsync;
16902 } transcoder[4];
16903 };
16904
16905 struct intel_display_error_state *
16906 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16907 {
16908 struct intel_display_error_state *error;
16909 int transcoders[] = {
16910 TRANSCODER_A,
16911 TRANSCODER_B,
16912 TRANSCODER_C,
16913 TRANSCODER_EDP,
16914 };
16915 int i;
16916
16917 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
16918
16919 if (!HAS_DISPLAY(dev_priv))
16920 return NULL;
16921
16922 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16923 if (error == NULL)
16924 return NULL;
16925
16926 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16927 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16928
16929 for_each_pipe(dev_priv, i) {
16930 error->pipe[i].power_domain_on =
16931 __intel_display_power_is_enabled(dev_priv,
16932 POWER_DOMAIN_PIPE(i));
16933 if (!error->pipe[i].power_domain_on)
16934 continue;
16935
16936 error->cursor[i].control = I915_READ(CURCNTR(i));
16937 error->cursor[i].position = I915_READ(CURPOS(i));
16938 error->cursor[i].base = I915_READ(CURBASE(i));
16939
16940 error->plane[i].control = I915_READ(DSPCNTR(i));
16941 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16942 if (INTEL_GEN(dev_priv) <= 3) {
16943 error->plane[i].size = I915_READ(DSPSIZE(i));
16944 error->plane[i].pos = I915_READ(DSPPOS(i));
16945 }
16946 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16947 error->plane[i].addr = I915_READ(DSPADDR(i));
16948 if (INTEL_GEN(dev_priv) >= 4) {
16949 error->plane[i].surface = I915_READ(DSPSURF(i));
16950 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16951 }
16952
16953 error->pipe[i].source = I915_READ(PIPESRC(i));
16954
16955 if (HAS_GMCH(dev_priv))
16956 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16957 }
16958
16959 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16960 enum transcoder cpu_transcoder = transcoders[i];
16961
16962 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
16963 continue;
16964
16965 error->transcoder[i].available = true;
16966 error->transcoder[i].power_domain_on =
16967 __intel_display_power_is_enabled(dev_priv,
16968 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16969 if (!error->transcoder[i].power_domain_on)
16970 continue;
16971
16972 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16973
16974 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16975 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16976 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16977 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16978 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16979 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16980 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16981 }
16982
16983 return error;
16984 }
16985
16986 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16987
16988 void
16989 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16990 struct intel_display_error_state *error)
16991 {
16992 struct drm_i915_private *dev_priv = m->i915;
16993 int i;
16994
16995 if (!error)
16996 return;
16997
16998 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16999 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17000 err_printf(m, "PWR_WELL_CTL2: %08x\n",
17001 error->power_well_driver);
17002 for_each_pipe(dev_priv, i) {
17003 err_printf(m, "Pipe [%d]:\n", i);
17004 err_printf(m, " Power: %s\n",
17005 onoff(error->pipe[i].power_domain_on));
17006 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
17007 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
17008
17009 err_printf(m, "Plane [%d]:\n", i);
17010 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
17011 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
17012 if (INTEL_GEN(dev_priv) <= 3) {
17013 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
17014 err_printf(m, " POS: %08x\n", error->plane[i].pos);
17015 }
17016 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17017 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
17018 if (INTEL_GEN(dev_priv) >= 4) {
17019 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
17020 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
17021 }
17022
17023 err_printf(m, "Cursor [%d]:\n", i);
17024 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
17025 err_printf(m, " POS: %08x\n", error->cursor[i].position);
17026 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
17027 }
17028
17029 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17030 if (!error->transcoder[i].available)
17031 continue;
17032
17033 err_printf(m, "CPU transcoder: %s\n",
17034 transcoder_name(error->transcoder[i].cpu_transcoder));
17035 err_printf(m, " Power: %s\n",
17036 onoff(error->transcoder[i].power_domain_on));
17037 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
17038 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
17039 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
17040 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
17041 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
17042 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
17043 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
17044 }
17045 }
17046
17047 #endif