]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/i915/display/intel_display.c
Merge tag 'drm-misc-next-2020-06-19' of git://anongit.freedesktop.org/drm/drm-misc...
[thirdparty/linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44
45 #include "display/intel_crt.h"
46 #include "display/intel_ddi.h"
47 #include "display/intel_dp.h"
48 #include "display/intel_dp_mst.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88 DRM_FORMAT_C8,
89 DRM_FORMAT_XRGB1555,
90 DRM_FORMAT_RGB565,
91 DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96 DRM_FORMAT_C8,
97 DRM_FORMAT_RGB565,
98 DRM_FORMAT_XRGB8888,
99 DRM_FORMAT_XBGR8888,
100 DRM_FORMAT_XRGB2101010,
101 DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106 DRM_FORMAT_C8,
107 DRM_FORMAT_RGB565,
108 DRM_FORMAT_XRGB8888,
109 DRM_FORMAT_XBGR8888,
110 DRM_FORMAT_XRGB2101010,
111 DRM_FORMAT_XBGR2101010,
112 DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117 DRM_FORMAT_C8,
118 DRM_FORMAT_RGB565,
119 DRM_FORMAT_XRGB8888,
120 DRM_FORMAT_XBGR8888,
121 DRM_FORMAT_ARGB8888,
122 DRM_FORMAT_ABGR8888,
123 DRM_FORMAT_XRGB2101010,
124 DRM_FORMAT_XBGR2101010,
125 DRM_FORMAT_ARGB2101010,
126 DRM_FORMAT_ABGR2101010,
127 DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131 I915_FORMAT_MOD_X_TILED,
132 DRM_FORMAT_MOD_LINEAR,
133 DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138 DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142 DRM_FORMAT_MOD_LINEAR,
143 DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147 struct intel_crtc_state *pipe_config);
148 static void ilk_pch_clock_get(struct intel_crtc *crtc,
149 struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152 struct drm_i915_gem_object *obj,
153 struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157 const struct intel_link_m_n *m_n,
158 const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164 const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166 const struct intel_crtc_state *pipe_config);
167 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
168 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
169 static void intel_modeset_setup_hw_state(struct drm_device *dev,
170 struct drm_modeset_acquire_ctx *ctx);
171 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
172
173 struct intel_limit {
174 struct {
175 int min, max;
176 } dot, vco, n, m, m1, m2, p, p1;
177
178 struct {
179 int dot_limit;
180 int p2_slow, p2_fast;
181 } p2;
182 };
183
184 /* returns HPLL frequency in kHz */
185 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
186 {
187 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
188
189 /* Obtain SKU information */
190 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
191 CCK_FUSE_HPLL_FREQ_MASK;
192
193 return vco_freq[hpll_freq] * 1000;
194 }
195
196 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
197 const char *name, u32 reg, int ref_freq)
198 {
199 u32 val;
200 int divider;
201
202 val = vlv_cck_read(dev_priv, reg);
203 divider = val & CCK_FREQUENCY_VALUES;
204
205 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
206 (divider << CCK_FREQUENCY_STATUS_SHIFT),
207 "%s change in progress\n", name);
208
209 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
210 }
211
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213 const char *name, u32 reg)
214 {
215 int hpll;
216
217 vlv_cck_get(dev_priv);
218
219 if (dev_priv->hpll_freq == 0)
220 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
221
222 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
223
224 vlv_cck_put(dev_priv);
225
226 return hpll;
227 }
228
229 static void intel_update_czclk(struct drm_i915_private *dev_priv)
230 {
231 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
232 return;
233
234 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
235 CCK_CZ_CLOCK_CONTROL);
236
237 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
238 dev_priv->czclk_freq);
239 }
240
241 /* units of 100MHz */
242 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
243 const struct intel_crtc_state *pipe_config)
244 {
245 if (HAS_DDI(dev_priv))
246 return pipe_config->port_clock; /* SPLL */
247 else
248 return dev_priv->fdi_pll_freq;
249 }
250
251 static const struct intel_limit intel_limits_i8xx_dac = {
252 .dot = { .min = 25000, .max = 350000 },
253 .vco = { .min = 908000, .max = 1512000 },
254 .n = { .min = 2, .max = 16 },
255 .m = { .min = 96, .max = 140 },
256 .m1 = { .min = 18, .max = 26 },
257 .m2 = { .min = 6, .max = 16 },
258 .p = { .min = 4, .max = 128 },
259 .p1 = { .min = 2, .max = 33 },
260 .p2 = { .dot_limit = 165000,
261 .p2_slow = 4, .p2_fast = 2 },
262 };
263
264 static const struct intel_limit intel_limits_i8xx_dvo = {
265 .dot = { .min = 25000, .max = 350000 },
266 .vco = { .min = 908000, .max = 1512000 },
267 .n = { .min = 2, .max = 16 },
268 .m = { .min = 96, .max = 140 },
269 .m1 = { .min = 18, .max = 26 },
270 .m2 = { .min = 6, .max = 16 },
271 .p = { .min = 4, .max = 128 },
272 .p1 = { .min = 2, .max = 33 },
273 .p2 = { .dot_limit = 165000,
274 .p2_slow = 4, .p2_fast = 4 },
275 };
276
277 static const struct intel_limit intel_limits_i8xx_lvds = {
278 .dot = { .min = 25000, .max = 350000 },
279 .vco = { .min = 908000, .max = 1512000 },
280 .n = { .min = 2, .max = 16 },
281 .m = { .min = 96, .max = 140 },
282 .m1 = { .min = 18, .max = 26 },
283 .m2 = { .min = 6, .max = 16 },
284 .p = { .min = 4, .max = 128 },
285 .p1 = { .min = 1, .max = 6 },
286 .p2 = { .dot_limit = 165000,
287 .p2_slow = 14, .p2_fast = 7 },
288 };
289
290 static const struct intel_limit intel_limits_i9xx_sdvo = {
291 .dot = { .min = 20000, .max = 400000 },
292 .vco = { .min = 1400000, .max = 2800000 },
293 .n = { .min = 1, .max = 6 },
294 .m = { .min = 70, .max = 120 },
295 .m1 = { .min = 8, .max = 18 },
296 .m2 = { .min = 3, .max = 7 },
297 .p = { .min = 5, .max = 80 },
298 .p1 = { .min = 1, .max = 8 },
299 .p2 = { .dot_limit = 200000,
300 .p2_slow = 10, .p2_fast = 5 },
301 };
302
303 static const struct intel_limit intel_limits_i9xx_lvds = {
304 .dot = { .min = 20000, .max = 400000 },
305 .vco = { .min = 1400000, .max = 2800000 },
306 .n = { .min = 1, .max = 6 },
307 .m = { .min = 70, .max = 120 },
308 .m1 = { .min = 8, .max = 18 },
309 .m2 = { .min = 3, .max = 7 },
310 .p = { .min = 7, .max = 98 },
311 .p1 = { .min = 1, .max = 8 },
312 .p2 = { .dot_limit = 112000,
313 .p2_slow = 14, .p2_fast = 7 },
314 };
315
316
317 static const struct intel_limit intel_limits_g4x_sdvo = {
318 .dot = { .min = 25000, .max = 270000 },
319 .vco = { .min = 1750000, .max = 3500000},
320 .n = { .min = 1, .max = 4 },
321 .m = { .min = 104, .max = 138 },
322 .m1 = { .min = 17, .max = 23 },
323 .m2 = { .min = 5, .max = 11 },
324 .p = { .min = 10, .max = 30 },
325 .p1 = { .min = 1, .max = 3},
326 .p2 = { .dot_limit = 270000,
327 .p2_slow = 10,
328 .p2_fast = 10
329 },
330 };
331
332 static const struct intel_limit intel_limits_g4x_hdmi = {
333 .dot = { .min = 22000, .max = 400000 },
334 .vco = { .min = 1750000, .max = 3500000},
335 .n = { .min = 1, .max = 4 },
336 .m = { .min = 104, .max = 138 },
337 .m1 = { .min = 16, .max = 23 },
338 .m2 = { .min = 5, .max = 11 },
339 .p = { .min = 5, .max = 80 },
340 .p1 = { .min = 1, .max = 8},
341 .p2 = { .dot_limit = 165000,
342 .p2_slow = 10, .p2_fast = 5 },
343 };
344
345 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
346 .dot = { .min = 20000, .max = 115000 },
347 .vco = { .min = 1750000, .max = 3500000 },
348 .n = { .min = 1, .max = 3 },
349 .m = { .min = 104, .max = 138 },
350 .m1 = { .min = 17, .max = 23 },
351 .m2 = { .min = 5, .max = 11 },
352 .p = { .min = 28, .max = 112 },
353 .p1 = { .min = 2, .max = 8 },
354 .p2 = { .dot_limit = 0,
355 .p2_slow = 14, .p2_fast = 14
356 },
357 };
358
359 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
360 .dot = { .min = 80000, .max = 224000 },
361 .vco = { .min = 1750000, .max = 3500000 },
362 .n = { .min = 1, .max = 3 },
363 .m = { .min = 104, .max = 138 },
364 .m1 = { .min = 17, .max = 23 },
365 .m2 = { .min = 5, .max = 11 },
366 .p = { .min = 14, .max = 42 },
367 .p1 = { .min = 2, .max = 6 },
368 .p2 = { .dot_limit = 0,
369 .p2_slow = 7, .p2_fast = 7
370 },
371 };
372
373 static const struct intel_limit pnv_limits_sdvo = {
374 .dot = { .min = 20000, .max = 400000},
375 .vco = { .min = 1700000, .max = 3500000 },
376 /* Pineview's Ncounter is a ring counter */
377 .n = { .min = 3, .max = 6 },
378 .m = { .min = 2, .max = 256 },
379 /* Pineview only has one combined m divider, which we treat as m2. */
380 .m1 = { .min = 0, .max = 0 },
381 .m2 = { .min = 0, .max = 254 },
382 .p = { .min = 5, .max = 80 },
383 .p1 = { .min = 1, .max = 8 },
384 .p2 = { .dot_limit = 200000,
385 .p2_slow = 10, .p2_fast = 5 },
386 };
387
388 static const struct intel_limit pnv_limits_lvds = {
389 .dot = { .min = 20000, .max = 400000 },
390 .vco = { .min = 1700000, .max = 3500000 },
391 .n = { .min = 3, .max = 6 },
392 .m = { .min = 2, .max = 256 },
393 .m1 = { .min = 0, .max = 0 },
394 .m2 = { .min = 0, .max = 254 },
395 .p = { .min = 7, .max = 112 },
396 .p1 = { .min = 1, .max = 8 },
397 .p2 = { .dot_limit = 112000,
398 .p2_slow = 14, .p2_fast = 14 },
399 };
400
401 /* Ironlake / Sandybridge
402 *
403 * We calculate clock using (register_value + 2) for N/M1/M2, so here
404 * the range value for them is (actual_value - 2).
405 */
406 static const struct intel_limit ilk_limits_dac = {
407 .dot = { .min = 25000, .max = 350000 },
408 .vco = { .min = 1760000, .max = 3510000 },
409 .n = { .min = 1, .max = 5 },
410 .m = { .min = 79, .max = 127 },
411 .m1 = { .min = 12, .max = 22 },
412 .m2 = { .min = 5, .max = 9 },
413 .p = { .min = 5, .max = 80 },
414 .p1 = { .min = 1, .max = 8 },
415 .p2 = { .dot_limit = 225000,
416 .p2_slow = 10, .p2_fast = 5 },
417 };
418
419 static const struct intel_limit ilk_limits_single_lvds = {
420 .dot = { .min = 25000, .max = 350000 },
421 .vco = { .min = 1760000, .max = 3510000 },
422 .n = { .min = 1, .max = 3 },
423 .m = { .min = 79, .max = 118 },
424 .m1 = { .min = 12, .max = 22 },
425 .m2 = { .min = 5, .max = 9 },
426 .p = { .min = 28, .max = 112 },
427 .p1 = { .min = 2, .max = 8 },
428 .p2 = { .dot_limit = 225000,
429 .p2_slow = 14, .p2_fast = 14 },
430 };
431
432 static const struct intel_limit ilk_limits_dual_lvds = {
433 .dot = { .min = 25000, .max = 350000 },
434 .vco = { .min = 1760000, .max = 3510000 },
435 .n = { .min = 1, .max = 3 },
436 .m = { .min = 79, .max = 127 },
437 .m1 = { .min = 12, .max = 22 },
438 .m2 = { .min = 5, .max = 9 },
439 .p = { .min = 14, .max = 56 },
440 .p1 = { .min = 2, .max = 8 },
441 .p2 = { .dot_limit = 225000,
442 .p2_slow = 7, .p2_fast = 7 },
443 };
444
445 /* LVDS 100mhz refclk limits. */
446 static const struct intel_limit ilk_limits_single_lvds_100m = {
447 .dot = { .min = 25000, .max = 350000 },
448 .vco = { .min = 1760000, .max = 3510000 },
449 .n = { .min = 1, .max = 2 },
450 .m = { .min = 79, .max = 126 },
451 .m1 = { .min = 12, .max = 22 },
452 .m2 = { .min = 5, .max = 9 },
453 .p = { .min = 28, .max = 112 },
454 .p1 = { .min = 2, .max = 8 },
455 .p2 = { .dot_limit = 225000,
456 .p2_slow = 14, .p2_fast = 14 },
457 };
458
459 static const struct intel_limit ilk_limits_dual_lvds_100m = {
460 .dot = { .min = 25000, .max = 350000 },
461 .vco = { .min = 1760000, .max = 3510000 },
462 .n = { .min = 1, .max = 3 },
463 .m = { .min = 79, .max = 126 },
464 .m1 = { .min = 12, .max = 22 },
465 .m2 = { .min = 5, .max = 9 },
466 .p = { .min = 14, .max = 42 },
467 .p1 = { .min = 2, .max = 6 },
468 .p2 = { .dot_limit = 225000,
469 .p2_slow = 7, .p2_fast = 7 },
470 };
471
472 static const struct intel_limit intel_limits_vlv = {
473 /*
474 * These are the data rate limits (measured in fast clocks)
475 * since those are the strictest limits we have. The fast
476 * clock and actual rate limits are more relaxed, so checking
477 * them would make no difference.
478 */
479 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
480 .vco = { .min = 4000000, .max = 6000000 },
481 .n = { .min = 1, .max = 7 },
482 .m1 = { .min = 2, .max = 3 },
483 .m2 = { .min = 11, .max = 156 },
484 .p1 = { .min = 2, .max = 3 },
485 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
486 };
487
488 static const struct intel_limit intel_limits_chv = {
489 /*
490 * These are the data rate limits (measured in fast clocks)
491 * since those are the strictest limits we have. The fast
492 * clock and actual rate limits are more relaxed, so checking
493 * them would make no difference.
494 */
495 .dot = { .min = 25000 * 5, .max = 540000 * 5},
496 .vco = { .min = 4800000, .max = 6480000 },
497 .n = { .min = 1, .max = 1 },
498 .m1 = { .min = 2, .max = 2 },
499 .m2 = { .min = 24 << 22, .max = 175 << 22 },
500 .p1 = { .min = 2, .max = 4 },
501 .p2 = { .p2_slow = 1, .p2_fast = 14 },
502 };
503
504 static const struct intel_limit intel_limits_bxt = {
505 /* FIXME: find real dot limits */
506 .dot = { .min = 0, .max = INT_MAX },
507 .vco = { .min = 4800000, .max = 6700000 },
508 .n = { .min = 1, .max = 1 },
509 .m1 = { .min = 2, .max = 2 },
510 /* FIXME: find real m2 limits */
511 .m2 = { .min = 2 << 22, .max = 255 << 22 },
512 .p1 = { .min = 2, .max = 4 },
513 .p2 = { .p2_slow = 1, .p2_fast = 20 },
514 };
515
516 /* WA Display #0827: Gen9:all */
517 static void
518 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
519 {
520 if (enable)
521 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
522 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
523 else
524 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
525 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
526 }
527
528 /* Wa_2006604312:icl,ehl */
529 static void
530 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
531 bool enable)
532 {
533 if (enable)
534 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
535 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
536 else
537 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
538 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
539 }
540
541 static bool
542 needs_modeset(const struct intel_crtc_state *state)
543 {
544 return drm_atomic_crtc_needs_modeset(&state->uapi);
545 }
546
547 static bool
548 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
549 {
550 return crtc_state->master_transcoder != INVALID_TRANSCODER;
551 }
552
553 static bool
554 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
555 {
556 return crtc_state->sync_mode_slaves_mask != 0;
557 }
558
559 bool
560 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
561 {
562 return is_trans_port_sync_master(crtc_state) ||
563 is_trans_port_sync_slave(crtc_state);
564 }
565
566 /*
567 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
568 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
569 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
570 * The helpers' return value is the rate of the clock that is fed to the
571 * display engine's pipe which can be the above fast dot clock rate or a
572 * divided-down version of it.
573 */
574 /* m1 is reserved as 0 in Pineview, n is a ring counter */
575 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
576 {
577 clock->m = clock->m2 + 2;
578 clock->p = clock->p1 * clock->p2;
579 if (WARN_ON(clock->n == 0 || clock->p == 0))
580 return 0;
581 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
583
584 return clock->dot;
585 }
586
587 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
588 {
589 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
590 }
591
592 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
593 {
594 clock->m = i9xx_dpll_compute_m(clock);
595 clock->p = clock->p1 * clock->p2;
596 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
597 return 0;
598 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
599 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
600
601 return clock->dot;
602 }
603
604 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
605 {
606 clock->m = clock->m1 * clock->m2;
607 clock->p = clock->p1 * clock->p2;
608 if (WARN_ON(clock->n == 0 || clock->p == 0))
609 return 0;
610 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
611 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
612
613 return clock->dot / 5;
614 }
615
616 int chv_calc_dpll_params(int refclk, struct dpll *clock)
617 {
618 clock->m = clock->m1 * clock->m2;
619 clock->p = clock->p1 * clock->p2;
620 if (WARN_ON(clock->n == 0 || clock->p == 0))
621 return 0;
622 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
623 clock->n << 22);
624 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
625
626 return clock->dot / 5;
627 }
628
629 /*
630 * Returns whether the given set of divisors are valid for a given refclk with
631 * the given connectors.
632 */
633 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
634 const struct intel_limit *limit,
635 const struct dpll *clock)
636 {
637 if (clock->n < limit->n.min || limit->n.max < clock->n)
638 return false;
639 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
640 return false;
641 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
642 return false;
643 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
644 return false;
645
646 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
647 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
648 if (clock->m1 <= clock->m2)
649 return false;
650
651 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
652 !IS_GEN9_LP(dev_priv)) {
653 if (clock->p < limit->p.min || limit->p.max < clock->p)
654 return false;
655 if (clock->m < limit->m.min || limit->m.max < clock->m)
656 return false;
657 }
658
659 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
660 return false;
661 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
662 * connector, etc., rather than just a single range.
663 */
664 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
665 return false;
666
667 return true;
668 }
669
670 static int
671 i9xx_select_p2_div(const struct intel_limit *limit,
672 const struct intel_crtc_state *crtc_state,
673 int target)
674 {
675 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
676
677 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
678 /*
679 * For LVDS just rely on its current settings for dual-channel.
680 * We haven't figured out how to reliably set up different
681 * single/dual channel state, if we even can.
682 */
683 if (intel_is_dual_link_lvds(dev_priv))
684 return limit->p2.p2_fast;
685 else
686 return limit->p2.p2_slow;
687 } else {
688 if (target < limit->p2.dot_limit)
689 return limit->p2.p2_slow;
690 else
691 return limit->p2.p2_fast;
692 }
693 }
694
695 /*
696 * Returns a set of divisors for the desired target clock with the given
697 * refclk, or FALSE. The returned values represent the clock equation:
698 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
699 *
700 * Target and reference clocks are specified in kHz.
701 *
702 * If match_clock is provided, then best_clock P divider must match the P
703 * divider from @match_clock used for LVDS downclocking.
704 */
705 static bool
706 i9xx_find_best_dpll(const struct intel_limit *limit,
707 struct intel_crtc_state *crtc_state,
708 int target, int refclk, struct dpll *match_clock,
709 struct dpll *best_clock)
710 {
711 struct drm_device *dev = crtc_state->uapi.crtc->dev;
712 struct dpll clock;
713 int err = target;
714
715 memset(best_clock, 0, sizeof(*best_clock));
716
717 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
718
719 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
720 clock.m1++) {
721 for (clock.m2 = limit->m2.min;
722 clock.m2 <= limit->m2.max; clock.m2++) {
723 if (clock.m2 >= clock.m1)
724 break;
725 for (clock.n = limit->n.min;
726 clock.n <= limit->n.max; clock.n++) {
727 for (clock.p1 = limit->p1.min;
728 clock.p1 <= limit->p1.max; clock.p1++) {
729 int this_err;
730
731 i9xx_calc_dpll_params(refclk, &clock);
732 if (!intel_pll_is_valid(to_i915(dev),
733 limit,
734 &clock))
735 continue;
736 if (match_clock &&
737 clock.p != match_clock->p)
738 continue;
739
740 this_err = abs(clock.dot - target);
741 if (this_err < err) {
742 *best_clock = clock;
743 err = this_err;
744 }
745 }
746 }
747 }
748 }
749
750 return (err != target);
751 }
752
753 /*
754 * Returns a set of divisors for the desired target clock with the given
755 * refclk, or FALSE. The returned values represent the clock equation:
756 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
757 *
758 * Target and reference clocks are specified in kHz.
759 *
760 * If match_clock is provided, then best_clock P divider must match the P
761 * divider from @match_clock used for LVDS downclocking.
762 */
763 static bool
764 pnv_find_best_dpll(const struct intel_limit *limit,
765 struct intel_crtc_state *crtc_state,
766 int target, int refclk, struct dpll *match_clock,
767 struct dpll *best_clock)
768 {
769 struct drm_device *dev = crtc_state->uapi.crtc->dev;
770 struct dpll clock;
771 int err = target;
772
773 memset(best_clock, 0, sizeof(*best_clock));
774
775 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
776
777 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
778 clock.m1++) {
779 for (clock.m2 = limit->m2.min;
780 clock.m2 <= limit->m2.max; clock.m2++) {
781 for (clock.n = limit->n.min;
782 clock.n <= limit->n.max; clock.n++) {
783 for (clock.p1 = limit->p1.min;
784 clock.p1 <= limit->p1.max; clock.p1++) {
785 int this_err;
786
787 pnv_calc_dpll_params(refclk, &clock);
788 if (!intel_pll_is_valid(to_i915(dev),
789 limit,
790 &clock))
791 continue;
792 if (match_clock &&
793 clock.p != match_clock->p)
794 continue;
795
796 this_err = abs(clock.dot - target);
797 if (this_err < err) {
798 *best_clock = clock;
799 err = this_err;
800 }
801 }
802 }
803 }
804 }
805
806 return (err != target);
807 }
808
809 /*
810 * Returns a set of divisors for the desired target clock with the given
811 * refclk, or FALSE. The returned values represent the clock equation:
812 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
813 *
814 * Target and reference clocks are specified in kHz.
815 *
816 * If match_clock is provided, then best_clock P divider must match the P
817 * divider from @match_clock used for LVDS downclocking.
818 */
819 static bool
820 g4x_find_best_dpll(const struct intel_limit *limit,
821 struct intel_crtc_state *crtc_state,
822 int target, int refclk, struct dpll *match_clock,
823 struct dpll *best_clock)
824 {
825 struct drm_device *dev = crtc_state->uapi.crtc->dev;
826 struct dpll clock;
827 int max_n;
828 bool found = false;
829 /* approximately equals target * 0.00585 */
830 int err_most = (target >> 8) + (target >> 9);
831
832 memset(best_clock, 0, sizeof(*best_clock));
833
834 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
835
836 max_n = limit->n.max;
837 /* based on hardware requirement, prefer smaller n to precision */
838 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
839 /* based on hardware requirement, prefere larger m1,m2 */
840 for (clock.m1 = limit->m1.max;
841 clock.m1 >= limit->m1.min; clock.m1--) {
842 for (clock.m2 = limit->m2.max;
843 clock.m2 >= limit->m2.min; clock.m2--) {
844 for (clock.p1 = limit->p1.max;
845 clock.p1 >= limit->p1.min; clock.p1--) {
846 int this_err;
847
848 i9xx_calc_dpll_params(refclk, &clock);
849 if (!intel_pll_is_valid(to_i915(dev),
850 limit,
851 &clock))
852 continue;
853
854 this_err = abs(clock.dot - target);
855 if (this_err < err_most) {
856 *best_clock = clock;
857 err_most = this_err;
858 max_n = clock.n;
859 found = true;
860 }
861 }
862 }
863 }
864 }
865 return found;
866 }
867
868 /*
869 * Check if the calculated PLL configuration is more optimal compared to the
870 * best configuration and error found so far. Return the calculated error.
871 */
872 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
873 const struct dpll *calculated_clock,
874 const struct dpll *best_clock,
875 unsigned int best_error_ppm,
876 unsigned int *error_ppm)
877 {
878 /*
879 * For CHV ignore the error and consider only the P value.
880 * Prefer a bigger P value based on HW requirements.
881 */
882 if (IS_CHERRYVIEW(to_i915(dev))) {
883 *error_ppm = 0;
884
885 return calculated_clock->p > best_clock->p;
886 }
887
888 if (drm_WARN_ON_ONCE(dev, !target_freq))
889 return false;
890
891 *error_ppm = div_u64(1000000ULL *
892 abs(target_freq - calculated_clock->dot),
893 target_freq);
894 /*
895 * Prefer a better P value over a better (smaller) error if the error
896 * is small. Ensure this preference for future configurations too by
897 * setting the error to 0.
898 */
899 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
900 *error_ppm = 0;
901
902 return true;
903 }
904
905 return *error_ppm + 10 < best_error_ppm;
906 }
907
908 /*
909 * Returns a set of divisors for the desired target clock with the given
910 * refclk, or FALSE. The returned values represent the clock equation:
911 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
912 */
913 static bool
914 vlv_find_best_dpll(const struct intel_limit *limit,
915 struct intel_crtc_state *crtc_state,
916 int target, int refclk, struct dpll *match_clock,
917 struct dpll *best_clock)
918 {
919 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
920 struct drm_device *dev = crtc->base.dev;
921 struct dpll clock;
922 unsigned int bestppm = 1000000;
923 /* min update 19.2 MHz */
924 int max_n = min(limit->n.max, refclk / 19200);
925 bool found = false;
926
927 target *= 5; /* fast clock */
928
929 memset(best_clock, 0, sizeof(*best_clock));
930
931 /* based on hardware requirement, prefer smaller n to precision */
932 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
933 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
934 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
935 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
936 clock.p = clock.p1 * clock.p2;
937 /* based on hardware requirement, prefer bigger m1,m2 values */
938 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
939 unsigned int ppm;
940
941 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
942 refclk * clock.m1);
943
944 vlv_calc_dpll_params(refclk, &clock);
945
946 if (!intel_pll_is_valid(to_i915(dev),
947 limit,
948 &clock))
949 continue;
950
951 if (!vlv_PLL_is_optimal(dev, target,
952 &clock,
953 best_clock,
954 bestppm, &ppm))
955 continue;
956
957 *best_clock = clock;
958 bestppm = ppm;
959 found = true;
960 }
961 }
962 }
963 }
964
965 return found;
966 }
967
968 /*
969 * Returns a set of divisors for the desired target clock with the given
970 * refclk, or FALSE. The returned values represent the clock equation:
971 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
972 */
973 static bool
974 chv_find_best_dpll(const struct intel_limit *limit,
975 struct intel_crtc_state *crtc_state,
976 int target, int refclk, struct dpll *match_clock,
977 struct dpll *best_clock)
978 {
979 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
980 struct drm_device *dev = crtc->base.dev;
981 unsigned int best_error_ppm;
982 struct dpll clock;
983 u64 m2;
984 int found = false;
985
986 memset(best_clock, 0, sizeof(*best_clock));
987 best_error_ppm = 1000000;
988
989 /*
990 * Based on hardware doc, the n always set to 1, and m1 always
991 * set to 2. If requires to support 200Mhz refclk, we need to
992 * revisit this because n may not 1 anymore.
993 */
994 clock.n = 1, clock.m1 = 2;
995 target *= 5; /* fast clock */
996
997 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
998 for (clock.p2 = limit->p2.p2_fast;
999 clock.p2 >= limit->p2.p2_slow;
1000 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1001 unsigned int error_ppm;
1002
1003 clock.p = clock.p1 * clock.p2;
1004
1005 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1006 refclk * clock.m1);
1007
1008 if (m2 > INT_MAX/clock.m1)
1009 continue;
1010
1011 clock.m2 = m2;
1012
1013 chv_calc_dpll_params(refclk, &clock);
1014
1015 if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
1016 continue;
1017
1018 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1019 best_error_ppm, &error_ppm))
1020 continue;
1021
1022 *best_clock = clock;
1023 best_error_ppm = error_ppm;
1024 found = true;
1025 }
1026 }
1027
1028 return found;
1029 }
1030
1031 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1032 struct dpll *best_clock)
1033 {
1034 int refclk = 100000;
1035 const struct intel_limit *limit = &intel_limits_bxt;
1036
1037 return chv_find_best_dpll(limit, crtc_state,
1038 crtc_state->port_clock, refclk,
1039 NULL, best_clock);
1040 }
1041
1042 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1043 enum pipe pipe)
1044 {
1045 i915_reg_t reg = PIPEDSL(pipe);
1046 u32 line1, line2;
1047 u32 line_mask;
1048
1049 if (IS_GEN(dev_priv, 2))
1050 line_mask = DSL_LINEMASK_GEN2;
1051 else
1052 line_mask = DSL_LINEMASK_GEN3;
1053
1054 line1 = intel_de_read(dev_priv, reg) & line_mask;
1055 msleep(5);
1056 line2 = intel_de_read(dev_priv, reg) & line_mask;
1057
1058 return line1 != line2;
1059 }
1060
1061 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1062 {
1063 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1064 enum pipe pipe = crtc->pipe;
1065
1066 /* Wait for the display line to settle/start moving */
1067 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1068 drm_err(&dev_priv->drm,
1069 "pipe %c scanline %s wait timed out\n",
1070 pipe_name(pipe), onoff(state));
1071 }
1072
1073 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1074 {
1075 wait_for_pipe_scanline_moving(crtc, false);
1076 }
1077
1078 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1079 {
1080 wait_for_pipe_scanline_moving(crtc, true);
1081 }
1082
1083 static void
1084 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1085 {
1086 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1087 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1088
1089 if (INTEL_GEN(dev_priv) >= 4) {
1090 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1091 i915_reg_t reg = PIPECONF(cpu_transcoder);
1092
1093 /* Wait for the Pipe State to go off */
1094 if (intel_de_wait_for_clear(dev_priv, reg,
1095 I965_PIPECONF_ACTIVE, 100))
1096 drm_WARN(&dev_priv->drm, 1,
1097 "pipe_off wait timed out\n");
1098 } else {
1099 intel_wait_for_pipe_scanline_stopped(crtc);
1100 }
1101 }
1102
1103 /* Only for pre-ILK configs */
1104 void assert_pll(struct drm_i915_private *dev_priv,
1105 enum pipe pipe, bool state)
1106 {
1107 u32 val;
1108 bool cur_state;
1109
1110 val = intel_de_read(dev_priv, DPLL(pipe));
1111 cur_state = !!(val & DPLL_VCO_ENABLE);
1112 I915_STATE_WARN(cur_state != state,
1113 "PLL state assertion failure (expected %s, current %s)\n",
1114 onoff(state), onoff(cur_state));
1115 }
1116
1117 /* XXX: the dsi pll is shared between MIPI DSI ports */
1118 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1119 {
1120 u32 val;
1121 bool cur_state;
1122
1123 vlv_cck_get(dev_priv);
1124 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1125 vlv_cck_put(dev_priv);
1126
1127 cur_state = val & DSI_PLL_VCO_EN;
1128 I915_STATE_WARN(cur_state != state,
1129 "DSI PLL state assertion failure (expected %s, current %s)\n",
1130 onoff(state), onoff(cur_state));
1131 }
1132
1133 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1134 enum pipe pipe, bool state)
1135 {
1136 bool cur_state;
1137
1138 if (HAS_DDI(dev_priv)) {
1139 /*
1140 * DDI does not have a specific FDI_TX register.
1141 *
1142 * FDI is never fed from EDP transcoder
1143 * so pipe->transcoder cast is fine here.
1144 */
1145 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1146 u32 val = intel_de_read(dev_priv,
1147 TRANS_DDI_FUNC_CTL(cpu_transcoder));
1148 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1149 } else {
1150 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1151 cur_state = !!(val & FDI_TX_ENABLE);
1152 }
1153 I915_STATE_WARN(cur_state != state,
1154 "FDI TX state assertion failure (expected %s, current %s)\n",
1155 onoff(state), onoff(cur_state));
1156 }
1157 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1158 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1159
1160 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1161 enum pipe pipe, bool state)
1162 {
1163 u32 val;
1164 bool cur_state;
1165
1166 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1167 cur_state = !!(val & FDI_RX_ENABLE);
1168 I915_STATE_WARN(cur_state != state,
1169 "FDI RX state assertion failure (expected %s, current %s)\n",
1170 onoff(state), onoff(cur_state));
1171 }
1172 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1173 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1174
1175 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1176 enum pipe pipe)
1177 {
1178 u32 val;
1179
1180 /* ILK FDI PLL is always enabled */
1181 if (IS_GEN(dev_priv, 5))
1182 return;
1183
1184 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1185 if (HAS_DDI(dev_priv))
1186 return;
1187
1188 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1189 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1190 }
1191
1192 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1193 enum pipe pipe, bool state)
1194 {
1195 u32 val;
1196 bool cur_state;
1197
1198 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1199 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1200 I915_STATE_WARN(cur_state != state,
1201 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1202 onoff(state), onoff(cur_state));
1203 }
1204
1205 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1206 {
1207 i915_reg_t pp_reg;
1208 u32 val;
1209 enum pipe panel_pipe = INVALID_PIPE;
1210 bool locked = true;
1211
1212 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1213 return;
1214
1215 if (HAS_PCH_SPLIT(dev_priv)) {
1216 u32 port_sel;
1217
1218 pp_reg = PP_CONTROL(0);
1219 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1220
1221 switch (port_sel) {
1222 case PANEL_PORT_SELECT_LVDS:
1223 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1224 break;
1225 case PANEL_PORT_SELECT_DPA:
1226 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1227 break;
1228 case PANEL_PORT_SELECT_DPC:
1229 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1230 break;
1231 case PANEL_PORT_SELECT_DPD:
1232 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1233 break;
1234 default:
1235 MISSING_CASE(port_sel);
1236 break;
1237 }
1238 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1239 /* presumably write lock depends on pipe, not port select */
1240 pp_reg = PP_CONTROL(pipe);
1241 panel_pipe = pipe;
1242 } else {
1243 u32 port_sel;
1244
1245 pp_reg = PP_CONTROL(0);
1246 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1247
1248 drm_WARN_ON(&dev_priv->drm,
1249 port_sel != PANEL_PORT_SELECT_LVDS);
1250 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1251 }
1252
1253 val = intel_de_read(dev_priv, pp_reg);
1254 if (!(val & PANEL_POWER_ON) ||
1255 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1256 locked = false;
1257
1258 I915_STATE_WARN(panel_pipe == pipe && locked,
1259 "panel assertion failure, pipe %c regs locked\n",
1260 pipe_name(pipe));
1261 }
1262
1263 void assert_pipe(struct drm_i915_private *dev_priv,
1264 enum transcoder cpu_transcoder, bool state)
1265 {
1266 bool cur_state;
1267 enum intel_display_power_domain power_domain;
1268 intel_wakeref_t wakeref;
1269
1270 /* we keep both pipes enabled on 830 */
1271 if (IS_I830(dev_priv))
1272 state = true;
1273
1274 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1275 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1276 if (wakeref) {
1277 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1278 cur_state = !!(val & PIPECONF_ENABLE);
1279
1280 intel_display_power_put(dev_priv, power_domain, wakeref);
1281 } else {
1282 cur_state = false;
1283 }
1284
1285 I915_STATE_WARN(cur_state != state,
1286 "transcoder %s assertion failure (expected %s, current %s)\n",
1287 transcoder_name(cpu_transcoder),
1288 onoff(state), onoff(cur_state));
1289 }
1290
1291 static void assert_plane(struct intel_plane *plane, bool state)
1292 {
1293 enum pipe pipe;
1294 bool cur_state;
1295
1296 cur_state = plane->get_hw_state(plane, &pipe);
1297
1298 I915_STATE_WARN(cur_state != state,
1299 "%s assertion failure (expected %s, current %s)\n",
1300 plane->base.name, onoff(state), onoff(cur_state));
1301 }
1302
1303 #define assert_plane_enabled(p) assert_plane(p, true)
1304 #define assert_plane_disabled(p) assert_plane(p, false)
1305
1306 static void assert_planes_disabled(struct intel_crtc *crtc)
1307 {
1308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1309 struct intel_plane *plane;
1310
1311 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1312 assert_plane_disabled(plane);
1313 }
1314
1315 static void assert_vblank_disabled(struct drm_crtc *crtc)
1316 {
1317 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1318 drm_crtc_vblank_put(crtc);
1319 }
1320
1321 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1322 enum pipe pipe)
1323 {
1324 u32 val;
1325 bool enabled;
1326
1327 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1328 enabled = !!(val & TRANS_ENABLE);
1329 I915_STATE_WARN(enabled,
1330 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1331 pipe_name(pipe));
1332 }
1333
1334 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1335 enum pipe pipe, enum port port,
1336 i915_reg_t dp_reg)
1337 {
1338 enum pipe port_pipe;
1339 bool state;
1340
1341 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1342
1343 I915_STATE_WARN(state && port_pipe == pipe,
1344 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1345 port_name(port), pipe_name(pipe));
1346
1347 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1348 "IBX PCH DP %c still using transcoder B\n",
1349 port_name(port));
1350 }
1351
1352 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1353 enum pipe pipe, enum port port,
1354 i915_reg_t hdmi_reg)
1355 {
1356 enum pipe port_pipe;
1357 bool state;
1358
1359 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1360
1361 I915_STATE_WARN(state && port_pipe == pipe,
1362 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1363 port_name(port), pipe_name(pipe));
1364
1365 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1366 "IBX PCH HDMI %c still using transcoder B\n",
1367 port_name(port));
1368 }
1369
1370 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1371 enum pipe pipe)
1372 {
1373 enum pipe port_pipe;
1374
1375 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1376 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1377 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1378
1379 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1380 port_pipe == pipe,
1381 "PCH VGA enabled on transcoder %c, should be disabled\n",
1382 pipe_name(pipe));
1383
1384 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1385 port_pipe == pipe,
1386 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1387 pipe_name(pipe));
1388
1389 /* PCH SDVOB multiplex with HDMIB */
1390 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1391 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1392 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1393 }
1394
1395 static void _vlv_enable_pll(struct intel_crtc *crtc,
1396 const struct intel_crtc_state *pipe_config)
1397 {
1398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1399 enum pipe pipe = crtc->pipe;
1400
1401 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1402 intel_de_posting_read(dev_priv, DPLL(pipe));
1403 udelay(150);
1404
1405 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1406 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1407 }
1408
1409 static void vlv_enable_pll(struct intel_crtc *crtc,
1410 const struct intel_crtc_state *pipe_config)
1411 {
1412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1413 enum pipe pipe = crtc->pipe;
1414
1415 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1416
1417 /* PLL is protected by panel, make sure we can write it */
1418 assert_panel_unlocked(dev_priv, pipe);
1419
1420 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1421 _vlv_enable_pll(crtc, pipe_config);
1422
1423 intel_de_write(dev_priv, DPLL_MD(pipe),
1424 pipe_config->dpll_hw_state.dpll_md);
1425 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1426 }
1427
1428
1429 static void _chv_enable_pll(struct intel_crtc *crtc,
1430 const struct intel_crtc_state *pipe_config)
1431 {
1432 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1433 enum pipe pipe = crtc->pipe;
1434 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1435 u32 tmp;
1436
1437 vlv_dpio_get(dev_priv);
1438
1439 /* Enable back the 10bit clock to display controller */
1440 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1441 tmp |= DPIO_DCLKP_EN;
1442 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1443
1444 vlv_dpio_put(dev_priv);
1445
1446 /*
1447 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1448 */
1449 udelay(1);
1450
1451 /* Enable PLL */
1452 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1453
1454 /* Check PLL is locked */
1455 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1456 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1457 }
1458
1459 static void chv_enable_pll(struct intel_crtc *crtc,
1460 const struct intel_crtc_state *pipe_config)
1461 {
1462 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1463 enum pipe pipe = crtc->pipe;
1464
1465 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1466
1467 /* PLL is protected by panel, make sure we can write it */
1468 assert_panel_unlocked(dev_priv, pipe);
1469
1470 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1471 _chv_enable_pll(crtc, pipe_config);
1472
1473 if (pipe != PIPE_A) {
1474 /*
1475 * WaPixelRepeatModeFixForC0:chv
1476 *
1477 * DPLLCMD is AWOL. Use chicken bits to propagate
1478 * the value from DPLLBMD to either pipe B or C.
1479 */
1480 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1481 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1482 pipe_config->dpll_hw_state.dpll_md);
1483 intel_de_write(dev_priv, CBR4_VLV, 0);
1484 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1485
1486 /*
1487 * DPLLB VGA mode also seems to cause problems.
1488 * We should always have it disabled.
1489 */
1490 drm_WARN_ON(&dev_priv->drm,
1491 (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1492 DPLL_VGA_MODE_DIS) == 0);
1493 } else {
1494 intel_de_write(dev_priv, DPLL_MD(pipe),
1495 pipe_config->dpll_hw_state.dpll_md);
1496 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1497 }
1498 }
1499
1500 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1501 {
1502 if (IS_I830(dev_priv))
1503 return false;
1504
1505 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1506 }
1507
1508 static void i9xx_enable_pll(struct intel_crtc *crtc,
1509 const struct intel_crtc_state *crtc_state)
1510 {
1511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512 i915_reg_t reg = DPLL(crtc->pipe);
1513 u32 dpll = crtc_state->dpll_hw_state.dpll;
1514 int i;
1515
1516 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1517
1518 /* PLL is protected by panel, make sure we can write it */
1519 if (i9xx_has_pps(dev_priv))
1520 assert_panel_unlocked(dev_priv, crtc->pipe);
1521
1522 /*
1523 * Apparently we need to have VGA mode enabled prior to changing
1524 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1525 * dividers, even though the register value does change.
1526 */
1527 intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1528 intel_de_write(dev_priv, reg, dpll);
1529
1530 /* Wait for the clocks to stabilize. */
1531 intel_de_posting_read(dev_priv, reg);
1532 udelay(150);
1533
1534 if (INTEL_GEN(dev_priv) >= 4) {
1535 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1536 crtc_state->dpll_hw_state.dpll_md);
1537 } else {
1538 /* The pixel multiplier can only be updated once the
1539 * DPLL is enabled and the clocks are stable.
1540 *
1541 * So write it again.
1542 */
1543 intel_de_write(dev_priv, reg, dpll);
1544 }
1545
1546 /* We do this three times for luck */
1547 for (i = 0; i < 3; i++) {
1548 intel_de_write(dev_priv, reg, dpll);
1549 intel_de_posting_read(dev_priv, reg);
1550 udelay(150); /* wait for warmup */
1551 }
1552 }
1553
1554 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1555 {
1556 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1557 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1558 enum pipe pipe = crtc->pipe;
1559
1560 /* Don't disable pipe or pipe PLLs if needed */
1561 if (IS_I830(dev_priv))
1562 return;
1563
1564 /* Make sure the pipe isn't still relying on us */
1565 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1566
1567 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1568 intel_de_posting_read(dev_priv, DPLL(pipe));
1569 }
1570
1571 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1572 {
1573 u32 val;
1574
1575 /* Make sure the pipe isn't still relying on us */
1576 assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1577
1578 val = DPLL_INTEGRATED_REF_CLK_VLV |
1579 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1580 if (pipe != PIPE_A)
1581 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1582
1583 intel_de_write(dev_priv, DPLL(pipe), val);
1584 intel_de_posting_read(dev_priv, DPLL(pipe));
1585 }
1586
1587 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1588 {
1589 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1590 u32 val;
1591
1592 /* Make sure the pipe isn't still relying on us */
1593 assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1594
1595 val = DPLL_SSC_REF_CLK_CHV |
1596 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1597 if (pipe != PIPE_A)
1598 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1599
1600 intel_de_write(dev_priv, DPLL(pipe), val);
1601 intel_de_posting_read(dev_priv, DPLL(pipe));
1602
1603 vlv_dpio_get(dev_priv);
1604
1605 /* Disable 10bit clock to display controller */
1606 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1607 val &= ~DPIO_DCLKP_EN;
1608 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1609
1610 vlv_dpio_put(dev_priv);
1611 }
1612
1613 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1614 struct intel_digital_port *dport,
1615 unsigned int expected_mask)
1616 {
1617 u32 port_mask;
1618 i915_reg_t dpll_reg;
1619
1620 switch (dport->base.port) {
1621 case PORT_B:
1622 port_mask = DPLL_PORTB_READY_MASK;
1623 dpll_reg = DPLL(0);
1624 break;
1625 case PORT_C:
1626 port_mask = DPLL_PORTC_READY_MASK;
1627 dpll_reg = DPLL(0);
1628 expected_mask <<= 4;
1629 break;
1630 case PORT_D:
1631 port_mask = DPLL_PORTD_READY_MASK;
1632 dpll_reg = DPIO_PHY_STATUS;
1633 break;
1634 default:
1635 BUG();
1636 }
1637
1638 if (intel_de_wait_for_register(dev_priv, dpll_reg,
1639 port_mask, expected_mask, 1000))
1640 drm_WARN(&dev_priv->drm, 1,
1641 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1642 dport->base.base.base.id, dport->base.base.name,
1643 intel_de_read(dev_priv, dpll_reg) & port_mask,
1644 expected_mask);
1645 }
1646
1647 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1648 {
1649 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1650 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1651 enum pipe pipe = crtc->pipe;
1652 i915_reg_t reg;
1653 u32 val, pipeconf_val;
1654
1655 /* Make sure PCH DPLL is enabled */
1656 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1657
1658 /* FDI must be feeding us bits for PCH ports */
1659 assert_fdi_tx_enabled(dev_priv, pipe);
1660 assert_fdi_rx_enabled(dev_priv, pipe);
1661
1662 if (HAS_PCH_CPT(dev_priv)) {
1663 reg = TRANS_CHICKEN2(pipe);
1664 val = intel_de_read(dev_priv, reg);
1665 /*
1666 * Workaround: Set the timing override bit
1667 * before enabling the pch transcoder.
1668 */
1669 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1670 /* Configure frame start delay to match the CPU */
1671 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1672 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1673 intel_de_write(dev_priv, reg, val);
1674 }
1675
1676 reg = PCH_TRANSCONF(pipe);
1677 val = intel_de_read(dev_priv, reg);
1678 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1679
1680 if (HAS_PCH_IBX(dev_priv)) {
1681 /* Configure frame start delay to match the CPU */
1682 val &= ~TRANS_FRAME_START_DELAY_MASK;
1683 val |= TRANS_FRAME_START_DELAY(0);
1684
1685 /*
1686 * Make the BPC in transcoder be consistent with
1687 * that in pipeconf reg. For HDMI we must use 8bpc
1688 * here for both 8bpc and 12bpc.
1689 */
1690 val &= ~PIPECONF_BPC_MASK;
1691 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1692 val |= PIPECONF_8BPC;
1693 else
1694 val |= pipeconf_val & PIPECONF_BPC_MASK;
1695 }
1696
1697 val &= ~TRANS_INTERLACE_MASK;
1698 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1699 if (HAS_PCH_IBX(dev_priv) &&
1700 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1701 val |= TRANS_LEGACY_INTERLACED_ILK;
1702 else
1703 val |= TRANS_INTERLACED;
1704 } else {
1705 val |= TRANS_PROGRESSIVE;
1706 }
1707
1708 intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1709 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1710 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1711 pipe_name(pipe));
1712 }
1713
1714 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1715 enum transcoder cpu_transcoder)
1716 {
1717 u32 val, pipeconf_val;
1718
1719 /* FDI must be feeding us bits for PCH ports */
1720 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1721 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1722
1723 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1724 /* Workaround: set timing override bit. */
1725 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1726 /* Configure frame start delay to match the CPU */
1727 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1728 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1729 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1730
1731 val = TRANS_ENABLE;
1732 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1733
1734 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1735 PIPECONF_INTERLACED_ILK)
1736 val |= TRANS_INTERLACED;
1737 else
1738 val |= TRANS_PROGRESSIVE;
1739
1740 intel_de_write(dev_priv, LPT_TRANSCONF, val);
1741 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1742 TRANS_STATE_ENABLE, 100))
1743 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1744 }
1745
1746 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1747 enum pipe pipe)
1748 {
1749 i915_reg_t reg;
1750 u32 val;
1751
1752 /* FDI relies on the transcoder */
1753 assert_fdi_tx_disabled(dev_priv, pipe);
1754 assert_fdi_rx_disabled(dev_priv, pipe);
1755
1756 /* Ports must be off as well */
1757 assert_pch_ports_disabled(dev_priv, pipe);
1758
1759 reg = PCH_TRANSCONF(pipe);
1760 val = intel_de_read(dev_priv, reg);
1761 val &= ~TRANS_ENABLE;
1762 intel_de_write(dev_priv, reg, val);
1763 /* wait for PCH transcoder off, transcoder state */
1764 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1765 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1766 pipe_name(pipe));
1767
1768 if (HAS_PCH_CPT(dev_priv)) {
1769 /* Workaround: Clear the timing override chicken bit again. */
1770 reg = TRANS_CHICKEN2(pipe);
1771 val = intel_de_read(dev_priv, reg);
1772 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1773 intel_de_write(dev_priv, reg, val);
1774 }
1775 }
1776
1777 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1778 {
1779 u32 val;
1780
1781 val = intel_de_read(dev_priv, LPT_TRANSCONF);
1782 val &= ~TRANS_ENABLE;
1783 intel_de_write(dev_priv, LPT_TRANSCONF, val);
1784 /* wait for PCH transcoder off, transcoder state */
1785 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1786 TRANS_STATE_ENABLE, 50))
1787 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1788
1789 /* Workaround: clear timing override bit. */
1790 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1791 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1792 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1793 }
1794
1795 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1796 {
1797 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1798
1799 if (HAS_PCH_LPT(dev_priv))
1800 return PIPE_A;
1801 else
1802 return crtc->pipe;
1803 }
1804
1805 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1806 {
1807 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1808
1809 /*
1810 * On i965gm the hardware frame counter reads
1811 * zero when the TV encoder is enabled :(
1812 */
1813 if (IS_I965GM(dev_priv) &&
1814 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1815 return 0;
1816
1817 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1818 return 0xffffffff; /* full 32 bit counter */
1819 else if (INTEL_GEN(dev_priv) >= 3)
1820 return 0xffffff; /* only 24 bits of frame count */
1821 else
1822 return 0; /* Gen2 doesn't have a hardware frame counter */
1823 }
1824
1825 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1826 {
1827 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1828
1829 assert_vblank_disabled(&crtc->base);
1830 drm_crtc_set_max_vblank_count(&crtc->base,
1831 intel_crtc_max_vblank_count(crtc_state));
1832 drm_crtc_vblank_on(&crtc->base);
1833 }
1834
1835 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1836 {
1837 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1838
1839 drm_crtc_vblank_off(&crtc->base);
1840 assert_vblank_disabled(&crtc->base);
1841 }
1842
1843 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1844 {
1845 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1846 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1847 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1848 enum pipe pipe = crtc->pipe;
1849 i915_reg_t reg;
1850 u32 val;
1851
1852 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1853
1854 assert_planes_disabled(crtc);
1855
1856 /*
1857 * A pipe without a PLL won't actually be able to drive bits from
1858 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1859 * need the check.
1860 */
1861 if (HAS_GMCH(dev_priv)) {
1862 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1863 assert_dsi_pll_enabled(dev_priv);
1864 else
1865 assert_pll_enabled(dev_priv, pipe);
1866 } else {
1867 if (new_crtc_state->has_pch_encoder) {
1868 /* if driving the PCH, we need FDI enabled */
1869 assert_fdi_rx_pll_enabled(dev_priv,
1870 intel_crtc_pch_transcoder(crtc));
1871 assert_fdi_tx_pll_enabled(dev_priv,
1872 (enum pipe) cpu_transcoder);
1873 }
1874 /* FIXME: assert CPU port conditions for SNB+ */
1875 }
1876
1877 trace_intel_pipe_enable(crtc);
1878
1879 reg = PIPECONF(cpu_transcoder);
1880 val = intel_de_read(dev_priv, reg);
1881 if (val & PIPECONF_ENABLE) {
1882 /* we keep both pipes enabled on 830 */
1883 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1884 return;
1885 }
1886
1887 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1888 intel_de_posting_read(dev_priv, reg);
1889
1890 /*
1891 * Until the pipe starts PIPEDSL reads will return a stale value,
1892 * which causes an apparent vblank timestamp jump when PIPEDSL
1893 * resets to its proper value. That also messes up the frame count
1894 * when it's derived from the timestamps. So let's wait for the
1895 * pipe to start properly before we call drm_crtc_vblank_on()
1896 */
1897 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1898 intel_wait_for_pipe_scanline_moving(crtc);
1899 }
1900
1901 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1902 {
1903 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1904 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1905 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1906 enum pipe pipe = crtc->pipe;
1907 i915_reg_t reg;
1908 u32 val;
1909
1910 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1911
1912 /*
1913 * Make sure planes won't keep trying to pump pixels to us,
1914 * or we might hang the display.
1915 */
1916 assert_planes_disabled(crtc);
1917
1918 trace_intel_pipe_disable(crtc);
1919
1920 reg = PIPECONF(cpu_transcoder);
1921 val = intel_de_read(dev_priv, reg);
1922 if ((val & PIPECONF_ENABLE) == 0)
1923 return;
1924
1925 /*
1926 * Double wide has implications for planes
1927 * so best keep it disabled when not needed.
1928 */
1929 if (old_crtc_state->double_wide)
1930 val &= ~PIPECONF_DOUBLE_WIDE;
1931
1932 /* Don't disable pipe or pipe PLLs if needed */
1933 if (!IS_I830(dev_priv))
1934 val &= ~PIPECONF_ENABLE;
1935
1936 intel_de_write(dev_priv, reg, val);
1937 if ((val & PIPECONF_ENABLE) == 0)
1938 intel_wait_for_pipe_off(old_crtc_state);
1939 }
1940
1941 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1942 {
1943 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1944 }
1945
1946 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1947 {
1948 if (!is_ccs_modifier(fb->modifier))
1949 return false;
1950
1951 return plane >= fb->format->num_planes / 2;
1952 }
1953
1954 static bool is_gen12_ccs_modifier(u64 modifier)
1955 {
1956 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1957 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1958
1959 }
1960
1961 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1962 {
1963 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1964 }
1965
1966 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1967 {
1968 if (is_ccs_modifier(fb->modifier))
1969 return is_ccs_plane(fb, plane);
1970
1971 return plane == 1;
1972 }
1973
1974 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1975 {
1976 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1977 (main_plane && main_plane >= fb->format->num_planes / 2));
1978
1979 return fb->format->num_planes / 2 + main_plane;
1980 }
1981
1982 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1983 {
1984 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1985 ccs_plane < fb->format->num_planes / 2);
1986
1987 return ccs_plane - fb->format->num_planes / 2;
1988 }
1989
1990 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1991 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1992 {
1993 if (is_ccs_modifier(fb->modifier))
1994 return main_to_ccs_plane(fb, main_plane);
1995
1996 return 1;
1997 }
1998
1999 bool
2000 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
2001 uint64_t modifier)
2002 {
2003 return info->is_yuv &&
2004 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
2005 }
2006
2007 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
2008 int color_plane)
2009 {
2010 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
2011 color_plane == 1;
2012 }
2013
2014 static unsigned int
2015 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
2016 {
2017 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2018 unsigned int cpp = fb->format->cpp[color_plane];
2019
2020 switch (fb->modifier) {
2021 case DRM_FORMAT_MOD_LINEAR:
2022 return intel_tile_size(dev_priv);
2023 case I915_FORMAT_MOD_X_TILED:
2024 if (IS_GEN(dev_priv, 2))
2025 return 128;
2026 else
2027 return 512;
2028 case I915_FORMAT_MOD_Y_TILED_CCS:
2029 if (is_ccs_plane(fb, color_plane))
2030 return 128;
2031 /* fall through */
2032 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2033 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2034 if (is_ccs_plane(fb, color_plane))
2035 return 64;
2036 /* fall through */
2037 case I915_FORMAT_MOD_Y_TILED:
2038 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2039 return 128;
2040 else
2041 return 512;
2042 case I915_FORMAT_MOD_Yf_TILED_CCS:
2043 if (is_ccs_plane(fb, color_plane))
2044 return 128;
2045 /* fall through */
2046 case I915_FORMAT_MOD_Yf_TILED:
2047 switch (cpp) {
2048 case 1:
2049 return 64;
2050 case 2:
2051 case 4:
2052 return 128;
2053 case 8:
2054 case 16:
2055 return 256;
2056 default:
2057 MISSING_CASE(cpp);
2058 return cpp;
2059 }
2060 break;
2061 default:
2062 MISSING_CASE(fb->modifier);
2063 return cpp;
2064 }
2065 }
2066
2067 static unsigned int
2068 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2069 {
2070 if (is_gen12_ccs_plane(fb, color_plane))
2071 return 1;
2072
2073 return intel_tile_size(to_i915(fb->dev)) /
2074 intel_tile_width_bytes(fb, color_plane);
2075 }
2076
2077 /* Return the tile dimensions in pixel units */
2078 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2079 unsigned int *tile_width,
2080 unsigned int *tile_height)
2081 {
2082 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2083 unsigned int cpp = fb->format->cpp[color_plane];
2084
2085 *tile_width = tile_width_bytes / cpp;
2086 *tile_height = intel_tile_height(fb, color_plane);
2087 }
2088
2089 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2090 int color_plane)
2091 {
2092 unsigned int tile_width, tile_height;
2093
2094 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2095
2096 return fb->pitches[color_plane] * tile_height;
2097 }
2098
2099 unsigned int
2100 intel_fb_align_height(const struct drm_framebuffer *fb,
2101 int color_plane, unsigned int height)
2102 {
2103 unsigned int tile_height = intel_tile_height(fb, color_plane);
2104
2105 return ALIGN(height, tile_height);
2106 }
2107
2108 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2109 {
2110 unsigned int size = 0;
2111 int i;
2112
2113 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2114 size += rot_info->plane[i].width * rot_info->plane[i].height;
2115
2116 return size;
2117 }
2118
2119 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2120 {
2121 unsigned int size = 0;
2122 int i;
2123
2124 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2125 size += rem_info->plane[i].width * rem_info->plane[i].height;
2126
2127 return size;
2128 }
2129
2130 static void
2131 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2132 const struct drm_framebuffer *fb,
2133 unsigned int rotation)
2134 {
2135 view->type = I915_GGTT_VIEW_NORMAL;
2136 if (drm_rotation_90_or_270(rotation)) {
2137 view->type = I915_GGTT_VIEW_ROTATED;
2138 view->rotated = to_intel_framebuffer(fb)->rot_info;
2139 }
2140 }
2141
2142 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2143 {
2144 if (IS_I830(dev_priv))
2145 return 16 * 1024;
2146 else if (IS_I85X(dev_priv))
2147 return 256;
2148 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2149 return 32;
2150 else
2151 return 4 * 1024;
2152 }
2153
2154 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2155 {
2156 if (INTEL_GEN(dev_priv) >= 9)
2157 return 256 * 1024;
2158 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2159 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2160 return 128 * 1024;
2161 else if (INTEL_GEN(dev_priv) >= 4)
2162 return 4 * 1024;
2163 else
2164 return 0;
2165 }
2166
2167 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2168 int color_plane)
2169 {
2170 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2171
2172 /* AUX_DIST needs only 4K alignment */
2173 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2174 is_ccs_plane(fb, color_plane))
2175 return 4096;
2176
2177 switch (fb->modifier) {
2178 case DRM_FORMAT_MOD_LINEAR:
2179 return intel_linear_alignment(dev_priv);
2180 case I915_FORMAT_MOD_X_TILED:
2181 if (INTEL_GEN(dev_priv) >= 9)
2182 return 256 * 1024;
2183 return 0;
2184 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2185 if (is_semiplanar_uv_plane(fb, color_plane))
2186 return intel_tile_row_size(fb, color_plane);
2187 /* Fall-through */
2188 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2189 return 16 * 1024;
2190 case I915_FORMAT_MOD_Y_TILED_CCS:
2191 case I915_FORMAT_MOD_Yf_TILED_CCS:
2192 case I915_FORMAT_MOD_Y_TILED:
2193 if (INTEL_GEN(dev_priv) >= 12 &&
2194 is_semiplanar_uv_plane(fb, color_plane))
2195 return intel_tile_row_size(fb, color_plane);
2196 /* Fall-through */
2197 case I915_FORMAT_MOD_Yf_TILED:
2198 return 1 * 1024 * 1024;
2199 default:
2200 MISSING_CASE(fb->modifier);
2201 return 0;
2202 }
2203 }
2204
2205 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2206 {
2207 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2208 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2209
2210 return INTEL_GEN(dev_priv) < 4 ||
2211 (plane->has_fbc &&
2212 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2213 }
2214
2215 struct i915_vma *
2216 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2217 const struct i915_ggtt_view *view,
2218 bool uses_fence,
2219 unsigned long *out_flags)
2220 {
2221 struct drm_device *dev = fb->dev;
2222 struct drm_i915_private *dev_priv = to_i915(dev);
2223 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2224 intel_wakeref_t wakeref;
2225 struct i915_vma *vma;
2226 unsigned int pinctl;
2227 u32 alignment;
2228
2229 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
2230 return ERR_PTR(-EINVAL);
2231
2232 alignment = intel_surf_alignment(fb, 0);
2233 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
2234 return ERR_PTR(-EINVAL);
2235
2236 /* Note that the w/a also requires 64 PTE of padding following the
2237 * bo. We currently fill all unused PTE with the shadow page and so
2238 * we should always have valid PTE following the scanout preventing
2239 * the VT-d warning.
2240 */
2241 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2242 alignment = 256 * 1024;
2243
2244 /*
2245 * Global gtt pte registers are special registers which actually forward
2246 * writes to a chunk of system memory. Which means that there is no risk
2247 * that the register values disappear as soon as we call
2248 * intel_runtime_pm_put(), so it is correct to wrap only the
2249 * pin/unpin/fence and not more.
2250 */
2251 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2252
2253 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2254
2255 /*
2256 * Valleyview is definitely limited to scanning out the first
2257 * 512MiB. Lets presume this behaviour was inherited from the
2258 * g4x display engine and that all earlier gen are similarly
2259 * limited. Testing suggests that it is a little more
2260 * complicated than this. For example, Cherryview appears quite
2261 * happy to scanout from anywhere within its global aperture.
2262 */
2263 pinctl = 0;
2264 if (HAS_GMCH(dev_priv))
2265 pinctl |= PIN_MAPPABLE;
2266
2267 vma = i915_gem_object_pin_to_display_plane(obj,
2268 alignment, view, pinctl);
2269 if (IS_ERR(vma))
2270 goto err;
2271
2272 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2273 int ret;
2274
2275 /*
2276 * Install a fence for tiled scan-out. Pre-i965 always needs a
2277 * fence, whereas 965+ only requires a fence if using
2278 * framebuffer compression. For simplicity, we always, when
2279 * possible, install a fence as the cost is not that onerous.
2280 *
2281 * If we fail to fence the tiled scanout, then either the
2282 * modeset will reject the change (which is highly unlikely as
2283 * the affected systems, all but one, do not have unmappable
2284 * space) or we will not be able to enable full powersaving
2285 * techniques (also likely not to apply due to various limits
2286 * FBC and the like impose on the size of the buffer, which
2287 * presumably we violated anyway with this unmappable buffer).
2288 * Anyway, it is presumably better to stumble onwards with
2289 * something and try to run the system in a "less than optimal"
2290 * mode that matches the user configuration.
2291 */
2292 ret = i915_vma_pin_fence(vma);
2293 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2294 i915_gem_object_unpin_from_display_plane(vma);
2295 vma = ERR_PTR(ret);
2296 goto err;
2297 }
2298
2299 if (ret == 0 && vma->fence)
2300 *out_flags |= PLANE_HAS_FENCE;
2301 }
2302
2303 i915_vma_get(vma);
2304 err:
2305 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2306 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2307 return vma;
2308 }
2309
2310 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2311 {
2312 i915_gem_object_lock(vma->obj);
2313 if (flags & PLANE_HAS_FENCE)
2314 i915_vma_unpin_fence(vma);
2315 i915_gem_object_unpin_from_display_plane(vma);
2316 i915_gem_object_unlock(vma->obj);
2317
2318 i915_vma_put(vma);
2319 }
2320
2321 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2322 unsigned int rotation)
2323 {
2324 if (drm_rotation_90_or_270(rotation))
2325 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2326 else
2327 return fb->pitches[color_plane];
2328 }
2329
2330 /*
2331 * Convert the x/y offsets into a linear offset.
2332 * Only valid with 0/180 degree rotation, which is fine since linear
2333 * offset is only used with linear buffers on pre-hsw and tiled buffers
2334 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2335 */
2336 u32 intel_fb_xy_to_linear(int x, int y,
2337 const struct intel_plane_state *state,
2338 int color_plane)
2339 {
2340 const struct drm_framebuffer *fb = state->hw.fb;
2341 unsigned int cpp = fb->format->cpp[color_plane];
2342 unsigned int pitch = state->color_plane[color_plane].stride;
2343
2344 return y * pitch + x * cpp;
2345 }
2346
2347 /*
2348 * Add the x/y offsets derived from fb->offsets[] to the user
2349 * specified plane src x/y offsets. The resulting x/y offsets
2350 * specify the start of scanout from the beginning of the gtt mapping.
2351 */
2352 void intel_add_fb_offsets(int *x, int *y,
2353 const struct intel_plane_state *state,
2354 int color_plane)
2355
2356 {
2357 *x += state->color_plane[color_plane].x;
2358 *y += state->color_plane[color_plane].y;
2359 }
2360
2361 static u32 intel_adjust_tile_offset(int *x, int *y,
2362 unsigned int tile_width,
2363 unsigned int tile_height,
2364 unsigned int tile_size,
2365 unsigned int pitch_tiles,
2366 u32 old_offset,
2367 u32 new_offset)
2368 {
2369 unsigned int pitch_pixels = pitch_tiles * tile_width;
2370 unsigned int tiles;
2371
2372 WARN_ON(old_offset & (tile_size - 1));
2373 WARN_ON(new_offset & (tile_size - 1));
2374 WARN_ON(new_offset > old_offset);
2375
2376 tiles = (old_offset - new_offset) / tile_size;
2377
2378 *y += tiles / pitch_tiles * tile_height;
2379 *x += tiles % pitch_tiles * tile_width;
2380
2381 /* minimize x in case it got needlessly big */
2382 *y += *x / pitch_pixels * tile_height;
2383 *x %= pitch_pixels;
2384
2385 return new_offset;
2386 }
2387
2388 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2389 {
2390 return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2391 is_gen12_ccs_plane(fb, color_plane);
2392 }
2393
2394 static u32 intel_adjust_aligned_offset(int *x, int *y,
2395 const struct drm_framebuffer *fb,
2396 int color_plane,
2397 unsigned int rotation,
2398 unsigned int pitch,
2399 u32 old_offset, u32 new_offset)
2400 {
2401 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2402 unsigned int cpp = fb->format->cpp[color_plane];
2403
2404 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
2405
2406 if (!is_surface_linear(fb, color_plane)) {
2407 unsigned int tile_size, tile_width, tile_height;
2408 unsigned int pitch_tiles;
2409
2410 tile_size = intel_tile_size(dev_priv);
2411 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2412
2413 if (drm_rotation_90_or_270(rotation)) {
2414 pitch_tiles = pitch / tile_height;
2415 swap(tile_width, tile_height);
2416 } else {
2417 pitch_tiles = pitch / (tile_width * cpp);
2418 }
2419
2420 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2421 tile_size, pitch_tiles,
2422 old_offset, new_offset);
2423 } else {
2424 old_offset += *y * pitch + *x * cpp;
2425
2426 *y = (old_offset - new_offset) / pitch;
2427 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2428 }
2429
2430 return new_offset;
2431 }
2432
2433 /*
2434 * Adjust the tile offset by moving the difference into
2435 * the x/y offsets.
2436 */
2437 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2438 const struct intel_plane_state *state,
2439 int color_plane,
2440 u32 old_offset, u32 new_offset)
2441 {
2442 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2443 state->hw.rotation,
2444 state->color_plane[color_plane].stride,
2445 old_offset, new_offset);
2446 }
2447
2448 /*
2449 * Computes the aligned offset to the base tile and adjusts
2450 * x, y. bytes per pixel is assumed to be a power-of-two.
2451 *
2452 * In the 90/270 rotated case, x and y are assumed
2453 * to be already rotated to match the rotated GTT view, and
2454 * pitch is the tile_height aligned framebuffer height.
2455 *
2456 * This function is used when computing the derived information
2457 * under intel_framebuffer, so using any of that information
2458 * here is not allowed. Anything under drm_framebuffer can be
2459 * used. This is why the user has to pass in the pitch since it
2460 * is specified in the rotated orientation.
2461 */
2462 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2463 int *x, int *y,
2464 const struct drm_framebuffer *fb,
2465 int color_plane,
2466 unsigned int pitch,
2467 unsigned int rotation,
2468 u32 alignment)
2469 {
2470 unsigned int cpp = fb->format->cpp[color_plane];
2471 u32 offset, offset_aligned;
2472
2473 if (!is_surface_linear(fb, color_plane)) {
2474 unsigned int tile_size, tile_width, tile_height;
2475 unsigned int tile_rows, tiles, pitch_tiles;
2476
2477 tile_size = intel_tile_size(dev_priv);
2478 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2479
2480 if (drm_rotation_90_or_270(rotation)) {
2481 pitch_tiles = pitch / tile_height;
2482 swap(tile_width, tile_height);
2483 } else {
2484 pitch_tiles = pitch / (tile_width * cpp);
2485 }
2486
2487 tile_rows = *y / tile_height;
2488 *y %= tile_height;
2489
2490 tiles = *x / tile_width;
2491 *x %= tile_width;
2492
2493 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2494
2495 offset_aligned = offset;
2496 if (alignment)
2497 offset_aligned = rounddown(offset_aligned, alignment);
2498
2499 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2500 tile_size, pitch_tiles,
2501 offset, offset_aligned);
2502 } else {
2503 offset = *y * pitch + *x * cpp;
2504 offset_aligned = offset;
2505 if (alignment) {
2506 offset_aligned = rounddown(offset_aligned, alignment);
2507 *y = (offset % alignment) / pitch;
2508 *x = ((offset % alignment) - *y * pitch) / cpp;
2509 } else {
2510 *y = *x = 0;
2511 }
2512 }
2513
2514 return offset_aligned;
2515 }
2516
2517 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2518 const struct intel_plane_state *state,
2519 int color_plane)
2520 {
2521 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2522 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2523 const struct drm_framebuffer *fb = state->hw.fb;
2524 unsigned int rotation = state->hw.rotation;
2525 int pitch = state->color_plane[color_plane].stride;
2526 u32 alignment;
2527
2528 if (intel_plane->id == PLANE_CURSOR)
2529 alignment = intel_cursor_alignment(dev_priv);
2530 else
2531 alignment = intel_surf_alignment(fb, color_plane);
2532
2533 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2534 pitch, rotation, alignment);
2535 }
2536
2537 /* Convert the fb->offset[] into x/y offsets */
2538 static int intel_fb_offset_to_xy(int *x, int *y,
2539 const struct drm_framebuffer *fb,
2540 int color_plane)
2541 {
2542 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2543 unsigned int height;
2544 u32 alignment;
2545
2546 if (INTEL_GEN(dev_priv) >= 12 &&
2547 is_semiplanar_uv_plane(fb, color_plane))
2548 alignment = intel_tile_row_size(fb, color_plane);
2549 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2550 alignment = intel_tile_size(dev_priv);
2551 else
2552 alignment = 0;
2553
2554 if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2555 drm_dbg_kms(&dev_priv->drm,
2556 "Misaligned offset 0x%08x for color plane %d\n",
2557 fb->offsets[color_plane], color_plane);
2558 return -EINVAL;
2559 }
2560
2561 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2562 height = ALIGN(height, intel_tile_height(fb, color_plane));
2563
2564 /* Catch potential overflows early */
2565 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2566 fb->offsets[color_plane])) {
2567 drm_dbg_kms(&dev_priv->drm,
2568 "Bad offset 0x%08x or pitch %d for color plane %d\n",
2569 fb->offsets[color_plane], fb->pitches[color_plane],
2570 color_plane);
2571 return -ERANGE;
2572 }
2573
2574 *x = 0;
2575 *y = 0;
2576
2577 intel_adjust_aligned_offset(x, y,
2578 fb, color_plane, DRM_MODE_ROTATE_0,
2579 fb->pitches[color_plane],
2580 fb->offsets[color_plane], 0);
2581
2582 return 0;
2583 }
2584
2585 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2586 {
2587 switch (fb_modifier) {
2588 case I915_FORMAT_MOD_X_TILED:
2589 return I915_TILING_X;
2590 case I915_FORMAT_MOD_Y_TILED:
2591 case I915_FORMAT_MOD_Y_TILED_CCS:
2592 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2593 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2594 return I915_TILING_Y;
2595 default:
2596 return I915_TILING_NONE;
2597 }
2598 }
2599
2600 /*
2601 * From the Sky Lake PRM:
2602 * "The Color Control Surface (CCS) contains the compression status of
2603 * the cache-line pairs. The compression state of the cache-line pair
2604 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2605 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2606 * cache-line-pairs. CCS is always Y tiled."
2607 *
2608 * Since cache line pairs refers to horizontally adjacent cache lines,
2609 * each cache line in the CCS corresponds to an area of 32x16 cache
2610 * lines on the main surface. Since each pixel is 4 bytes, this gives
2611 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2612 * main surface.
2613 */
2614 static const struct drm_format_info skl_ccs_formats[] = {
2615 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2616 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2617 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2618 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2619 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2620 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2621 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2622 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2623 };
2624
2625 /*
2626 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2627 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2628 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2629 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2630 * the main surface.
2631 */
2632 static const struct drm_format_info gen12_ccs_formats[] = {
2633 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2634 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2635 .hsub = 1, .vsub = 1, },
2636 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2637 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2638 .hsub = 1, .vsub = 1, },
2639 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2640 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2641 .hsub = 1, .vsub = 1, .has_alpha = true },
2642 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2643 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2644 .hsub = 1, .vsub = 1, .has_alpha = true },
2645 { .format = DRM_FORMAT_YUYV, .num_planes = 2,
2646 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2647 .hsub = 2, .vsub = 1, .is_yuv = true },
2648 { .format = DRM_FORMAT_YVYU, .num_planes = 2,
2649 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2650 .hsub = 2, .vsub = 1, .is_yuv = true },
2651 { .format = DRM_FORMAT_UYVY, .num_planes = 2,
2652 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2653 .hsub = 2, .vsub = 1, .is_yuv = true },
2654 { .format = DRM_FORMAT_VYUY, .num_planes = 2,
2655 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2656 .hsub = 2, .vsub = 1, .is_yuv = true },
2657 { .format = DRM_FORMAT_NV12, .num_planes = 4,
2658 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2659 .hsub = 2, .vsub = 2, .is_yuv = true },
2660 { .format = DRM_FORMAT_P010, .num_planes = 4,
2661 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2662 .hsub = 2, .vsub = 2, .is_yuv = true },
2663 { .format = DRM_FORMAT_P012, .num_planes = 4,
2664 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2665 .hsub = 2, .vsub = 2, .is_yuv = true },
2666 { .format = DRM_FORMAT_P016, .num_planes = 4,
2667 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2668 .hsub = 2, .vsub = 2, .is_yuv = true },
2669 };
2670
2671 static const struct drm_format_info *
2672 lookup_format_info(const struct drm_format_info formats[],
2673 int num_formats, u32 format)
2674 {
2675 int i;
2676
2677 for (i = 0; i < num_formats; i++) {
2678 if (formats[i].format == format)
2679 return &formats[i];
2680 }
2681
2682 return NULL;
2683 }
2684
2685 static const struct drm_format_info *
2686 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2687 {
2688 switch (cmd->modifier[0]) {
2689 case I915_FORMAT_MOD_Y_TILED_CCS:
2690 case I915_FORMAT_MOD_Yf_TILED_CCS:
2691 return lookup_format_info(skl_ccs_formats,
2692 ARRAY_SIZE(skl_ccs_formats),
2693 cmd->pixel_format);
2694 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2695 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2696 return lookup_format_info(gen12_ccs_formats,
2697 ARRAY_SIZE(gen12_ccs_formats),
2698 cmd->pixel_format);
2699 default:
2700 return NULL;
2701 }
2702 }
2703
2704 bool is_ccs_modifier(u64 modifier)
2705 {
2706 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2707 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2708 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2709 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2710 }
2711
2712 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2713 {
2714 return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2715 512) * 64;
2716 }
2717
2718 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2719 u32 pixel_format, u64 modifier)
2720 {
2721 struct intel_crtc *crtc;
2722 struct intel_plane *plane;
2723
2724 /*
2725 * We assume the primary plane for pipe A has
2726 * the highest stride limits of them all,
2727 * if in case pipe A is disabled, use the first pipe from pipe_mask.
2728 */
2729 crtc = intel_get_first_crtc(dev_priv);
2730 if (!crtc)
2731 return 0;
2732
2733 plane = to_intel_plane(crtc->base.primary);
2734
2735 return plane->max_stride(plane, pixel_format, modifier,
2736 DRM_MODE_ROTATE_0);
2737 }
2738
2739 static
2740 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2741 u32 pixel_format, u64 modifier)
2742 {
2743 /*
2744 * Arbitrary limit for gen4+ chosen to match the
2745 * render engine max stride.
2746 *
2747 * The new CCS hash mode makes remapping impossible
2748 */
2749 if (!is_ccs_modifier(modifier)) {
2750 if (INTEL_GEN(dev_priv) >= 7)
2751 return 256*1024;
2752 else if (INTEL_GEN(dev_priv) >= 4)
2753 return 128*1024;
2754 }
2755
2756 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2757 }
2758
2759 static u32
2760 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2761 {
2762 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2763 u32 tile_width;
2764
2765 if (is_surface_linear(fb, color_plane)) {
2766 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2767 fb->format->format,
2768 fb->modifier);
2769
2770 /*
2771 * To make remapping with linear generally feasible
2772 * we need the stride to be page aligned.
2773 */
2774 if (fb->pitches[color_plane] > max_stride &&
2775 !is_ccs_modifier(fb->modifier))
2776 return intel_tile_size(dev_priv);
2777 else
2778 return 64;
2779 }
2780
2781 tile_width = intel_tile_width_bytes(fb, color_plane);
2782 if (is_ccs_modifier(fb->modifier)) {
2783 /*
2784 * Display WA #0531: skl,bxt,kbl,glk
2785 *
2786 * Render decompression and plane width > 3840
2787 * combined with horizontal panning requires the
2788 * plane stride to be a multiple of 4. We'll just
2789 * require the entire fb to accommodate that to avoid
2790 * potential runtime errors at plane configuration time.
2791 */
2792 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2793 tile_width *= 4;
2794 /*
2795 * The main surface pitch must be padded to a multiple of four
2796 * tile widths.
2797 */
2798 else if (INTEL_GEN(dev_priv) >= 12)
2799 tile_width *= 4;
2800 }
2801 return tile_width;
2802 }
2803
2804 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2805 {
2806 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2807 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2808 const struct drm_framebuffer *fb = plane_state->hw.fb;
2809 int i;
2810
2811 /* We don't want to deal with remapping with cursors */
2812 if (plane->id == PLANE_CURSOR)
2813 return false;
2814
2815 /*
2816 * The display engine limits already match/exceed the
2817 * render engine limits, so not much point in remapping.
2818 * Would also need to deal with the fence POT alignment
2819 * and gen2 2KiB GTT tile size.
2820 */
2821 if (INTEL_GEN(dev_priv) < 4)
2822 return false;
2823
2824 /*
2825 * The new CCS hash mode isn't compatible with remapping as
2826 * the virtual address of the pages affects the compressed data.
2827 */
2828 if (is_ccs_modifier(fb->modifier))
2829 return false;
2830
2831 /* Linear needs a page aligned stride for remapping */
2832 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2833 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2834
2835 for (i = 0; i < fb->format->num_planes; i++) {
2836 if (fb->pitches[i] & alignment)
2837 return false;
2838 }
2839 }
2840
2841 return true;
2842 }
2843
2844 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2845 {
2846 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2847 const struct drm_framebuffer *fb = plane_state->hw.fb;
2848 unsigned int rotation = plane_state->hw.rotation;
2849 u32 stride, max_stride;
2850
2851 /*
2852 * No remapping for invisible planes since we don't have
2853 * an actual source viewport to remap.
2854 */
2855 if (!plane_state->uapi.visible)
2856 return false;
2857
2858 if (!intel_plane_can_remap(plane_state))
2859 return false;
2860
2861 /*
2862 * FIXME: aux plane limits on gen9+ are
2863 * unclear in Bspec, for now no checking.
2864 */
2865 stride = intel_fb_pitch(fb, 0, rotation);
2866 max_stride = plane->max_stride(plane, fb->format->format,
2867 fb->modifier, rotation);
2868
2869 return stride > max_stride;
2870 }
2871
2872 static void
2873 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2874 const struct drm_framebuffer *fb,
2875 int color_plane)
2876 {
2877 int main_plane;
2878
2879 if (color_plane == 0) {
2880 *hsub = 1;
2881 *vsub = 1;
2882
2883 return;
2884 }
2885
2886 /*
2887 * TODO: Deduct the subsampling from the char block for all CCS
2888 * formats and planes.
2889 */
2890 if (!is_gen12_ccs_plane(fb, color_plane)) {
2891 *hsub = fb->format->hsub;
2892 *vsub = fb->format->vsub;
2893
2894 return;
2895 }
2896
2897 main_plane = ccs_to_main_plane(fb, color_plane);
2898 *hsub = drm_format_info_block_width(fb->format, color_plane) /
2899 drm_format_info_block_width(fb->format, main_plane);
2900
2901 /*
2902 * The min stride check in the core framebuffer_check() function
2903 * assumes that format->hsub applies to every plane except for the
2904 * first plane. That's incorrect for the CCS AUX plane of the first
2905 * plane, but for the above check to pass we must define the block
2906 * width with that subsampling applied to it. Adjust the width here
2907 * accordingly, so we can calculate the actual subsampling factor.
2908 */
2909 if (main_plane == 0)
2910 *hsub *= fb->format->hsub;
2911
2912 *vsub = 32;
2913 }
2914 static int
2915 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2916 {
2917 struct drm_i915_private *i915 = to_i915(fb->dev);
2918 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2919 int main_plane;
2920 int hsub, vsub;
2921 int tile_width, tile_height;
2922 int ccs_x, ccs_y;
2923 int main_x, main_y;
2924
2925 if (!is_ccs_plane(fb, ccs_plane))
2926 return 0;
2927
2928 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2929 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2930
2931 tile_width *= hsub;
2932 tile_height *= vsub;
2933
2934 ccs_x = (x * hsub) % tile_width;
2935 ccs_y = (y * vsub) % tile_height;
2936
2937 main_plane = ccs_to_main_plane(fb, ccs_plane);
2938 main_x = intel_fb->normal[main_plane].x % tile_width;
2939 main_y = intel_fb->normal[main_plane].y % tile_height;
2940
2941 /*
2942 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2943 * x/y offsets must match between CCS and the main surface.
2944 */
2945 if (main_x != ccs_x || main_y != ccs_y) {
2946 drm_dbg_kms(&i915->drm,
2947 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2948 main_x, main_y,
2949 ccs_x, ccs_y,
2950 intel_fb->normal[main_plane].x,
2951 intel_fb->normal[main_plane].y,
2952 x, y);
2953 return -EINVAL;
2954 }
2955
2956 return 0;
2957 }
2958
2959 static void
2960 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2961 {
2962 int main_plane = is_ccs_plane(fb, color_plane) ?
2963 ccs_to_main_plane(fb, color_plane) : 0;
2964 int main_hsub, main_vsub;
2965 int hsub, vsub;
2966
2967 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2968 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2969 *w = fb->width / main_hsub / hsub;
2970 *h = fb->height / main_vsub / vsub;
2971 }
2972
2973 /*
2974 * Setup the rotated view for an FB plane and return the size the GTT mapping
2975 * requires for this view.
2976 */
2977 static u32
2978 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2979 u32 gtt_offset_rotated, int x, int y,
2980 unsigned int width, unsigned int height,
2981 unsigned int tile_size,
2982 unsigned int tile_width, unsigned int tile_height,
2983 struct drm_framebuffer *fb)
2984 {
2985 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2986 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2987 unsigned int pitch_tiles;
2988 struct drm_rect r;
2989
2990 /* Y or Yf modifiers required for 90/270 rotation */
2991 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2992 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2993 return 0;
2994
2995 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
2996 return 0;
2997
2998 rot_info->plane[plane] = *plane_info;
2999
3000 intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
3001
3002 /* rotate the x/y offsets to match the GTT view */
3003 drm_rect_init(&r, x, y, width, height);
3004 drm_rect_rotate(&r,
3005 plane_info->width * tile_width,
3006 plane_info->height * tile_height,
3007 DRM_MODE_ROTATE_270);
3008 x = r.x1;
3009 y = r.y1;
3010
3011 /* rotate the tile dimensions to match the GTT view */
3012 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
3013 swap(tile_width, tile_height);
3014
3015 /*
3016 * We only keep the x/y offsets, so push all of the
3017 * gtt offset into the x/y offsets.
3018 */
3019 intel_adjust_tile_offset(&x, &y,
3020 tile_width, tile_height,
3021 tile_size, pitch_tiles,
3022 gtt_offset_rotated * tile_size, 0);
3023
3024 /*
3025 * First pixel of the framebuffer from
3026 * the start of the rotated gtt mapping.
3027 */
3028 intel_fb->rotated[plane].x = x;
3029 intel_fb->rotated[plane].y = y;
3030
3031 return plane_info->width * plane_info->height;
3032 }
3033
3034 static int
3035 intel_fill_fb_info(struct drm_i915_private *dev_priv,
3036 struct drm_framebuffer *fb)
3037 {
3038 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3039 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3040 u32 gtt_offset_rotated = 0;
3041 unsigned int max_size = 0;
3042 int i, num_planes = fb->format->num_planes;
3043 unsigned int tile_size = intel_tile_size(dev_priv);
3044
3045 for (i = 0; i < num_planes; i++) {
3046 unsigned int width, height;
3047 unsigned int cpp, size;
3048 u32 offset;
3049 int x, y;
3050 int ret;
3051
3052 cpp = fb->format->cpp[i];
3053 intel_fb_plane_dims(&width, &height, fb, i);
3054
3055 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3056 if (ret) {
3057 drm_dbg_kms(&dev_priv->drm,
3058 "bad fb plane %d offset: 0x%x\n",
3059 i, fb->offsets[i]);
3060 return ret;
3061 }
3062
3063 ret = intel_fb_check_ccs_xy(fb, i, x, y);
3064 if (ret)
3065 return ret;
3066
3067 /*
3068 * The fence (if used) is aligned to the start of the object
3069 * so having the framebuffer wrap around across the edge of the
3070 * fenced region doesn't really work. We have no API to configure
3071 * the fence start offset within the object (nor could we probably
3072 * on gen2/3). So it's just easier if we just require that the
3073 * fb layout agrees with the fence layout. We already check that the
3074 * fb stride matches the fence stride elsewhere.
3075 */
3076 if (i == 0 && i915_gem_object_is_tiled(obj) &&
3077 (x + width) * cpp > fb->pitches[i]) {
3078 drm_dbg_kms(&dev_priv->drm,
3079 "bad fb plane %d offset: 0x%x\n",
3080 i, fb->offsets[i]);
3081 return -EINVAL;
3082 }
3083
3084 /*
3085 * First pixel of the framebuffer from
3086 * the start of the normal gtt mapping.
3087 */
3088 intel_fb->normal[i].x = x;
3089 intel_fb->normal[i].y = y;
3090
3091 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3092 fb->pitches[i],
3093 DRM_MODE_ROTATE_0,
3094 tile_size);
3095 offset /= tile_size;
3096
3097 if (!is_surface_linear(fb, i)) {
3098 struct intel_remapped_plane_info plane_info;
3099 unsigned int tile_width, tile_height;
3100
3101 intel_tile_dims(fb, i, &tile_width, &tile_height);
3102
3103 plane_info.offset = offset;
3104 plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3105 tile_width * cpp);
3106 plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3107 plane_info.height = DIV_ROUND_UP(y + height,
3108 tile_height);
3109
3110 /* how many tiles does this plane need */
3111 size = plane_info.stride * plane_info.height;
3112 /*
3113 * If the plane isn't horizontally tile aligned,
3114 * we need one more tile.
3115 */
3116 if (x != 0)
3117 size++;
3118
3119 gtt_offset_rotated +=
3120 setup_fb_rotation(i, &plane_info,
3121 gtt_offset_rotated,
3122 x, y, width, height,
3123 tile_size,
3124 tile_width, tile_height,
3125 fb);
3126 } else {
3127 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3128 x * cpp, tile_size);
3129 }
3130
3131 /* how many tiles in total needed in the bo */
3132 max_size = max(max_size, offset + size);
3133 }
3134
3135 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3136 drm_dbg_kms(&dev_priv->drm,
3137 "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3138 mul_u32_u32(max_size, tile_size), obj->base.size);
3139 return -EINVAL;
3140 }
3141
3142 return 0;
3143 }
3144
3145 static void
3146 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3147 {
3148 struct drm_i915_private *dev_priv =
3149 to_i915(plane_state->uapi.plane->dev);
3150 struct drm_framebuffer *fb = plane_state->hw.fb;
3151 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3152 struct intel_rotation_info *info = &plane_state->view.rotated;
3153 unsigned int rotation = plane_state->hw.rotation;
3154 int i, num_planes = fb->format->num_planes;
3155 unsigned int tile_size = intel_tile_size(dev_priv);
3156 unsigned int src_x, src_y;
3157 unsigned int src_w, src_h;
3158 u32 gtt_offset = 0;
3159
3160 memset(&plane_state->view, 0, sizeof(plane_state->view));
3161 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3162 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3163
3164 src_x = plane_state->uapi.src.x1 >> 16;
3165 src_y = plane_state->uapi.src.y1 >> 16;
3166 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3167 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3168
3169 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
3170
3171 /* Make src coordinates relative to the viewport */
3172 drm_rect_translate(&plane_state->uapi.src,
3173 -(src_x << 16), -(src_y << 16));
3174
3175 /* Rotate src coordinates to match rotated GTT view */
3176 if (drm_rotation_90_or_270(rotation))
3177 drm_rect_rotate(&plane_state->uapi.src,
3178 src_w << 16, src_h << 16,
3179 DRM_MODE_ROTATE_270);
3180
3181 for (i = 0; i < num_planes; i++) {
3182 unsigned int hsub = i ? fb->format->hsub : 1;
3183 unsigned int vsub = i ? fb->format->vsub : 1;
3184 unsigned int cpp = fb->format->cpp[i];
3185 unsigned int tile_width, tile_height;
3186 unsigned int width, height;
3187 unsigned int pitch_tiles;
3188 unsigned int x, y;
3189 u32 offset;
3190
3191 intel_tile_dims(fb, i, &tile_width, &tile_height);
3192
3193 x = src_x / hsub;
3194 y = src_y / vsub;
3195 width = src_w / hsub;
3196 height = src_h / vsub;
3197
3198 /*
3199 * First pixel of the src viewport from the
3200 * start of the normal gtt mapping.
3201 */
3202 x += intel_fb->normal[i].x;
3203 y += intel_fb->normal[i].y;
3204
3205 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3206 fb, i, fb->pitches[i],
3207 DRM_MODE_ROTATE_0, tile_size);
3208 offset /= tile_size;
3209
3210 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
3211 info->plane[i].offset = offset;
3212 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3213 tile_width * cpp);
3214 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3215 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3216
3217 if (drm_rotation_90_or_270(rotation)) {
3218 struct drm_rect r;
3219
3220 /* rotate the x/y offsets to match the GTT view */
3221 drm_rect_init(&r, x, y, width, height);
3222 drm_rect_rotate(&r,
3223 info->plane[i].width * tile_width,
3224 info->plane[i].height * tile_height,
3225 DRM_MODE_ROTATE_270);
3226 x = r.x1;
3227 y = r.y1;
3228
3229 pitch_tiles = info->plane[i].height;
3230 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3231
3232 /* rotate the tile dimensions to match the GTT view */
3233 swap(tile_width, tile_height);
3234 } else {
3235 pitch_tiles = info->plane[i].width;
3236 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3237 }
3238
3239 /*
3240 * We only keep the x/y offsets, so push all of the
3241 * gtt offset into the x/y offsets.
3242 */
3243 intel_adjust_tile_offset(&x, &y,
3244 tile_width, tile_height,
3245 tile_size, pitch_tiles,
3246 gtt_offset * tile_size, 0);
3247
3248 gtt_offset += info->plane[i].width * info->plane[i].height;
3249
3250 plane_state->color_plane[i].offset = 0;
3251 plane_state->color_plane[i].x = x;
3252 plane_state->color_plane[i].y = y;
3253 }
3254 }
3255
3256 static int
3257 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3258 {
3259 const struct intel_framebuffer *fb =
3260 to_intel_framebuffer(plane_state->hw.fb);
3261 unsigned int rotation = plane_state->hw.rotation;
3262 int i, num_planes;
3263
3264 if (!fb)
3265 return 0;
3266
3267 num_planes = fb->base.format->num_planes;
3268
3269 if (intel_plane_needs_remap(plane_state)) {
3270 intel_plane_remap_gtt(plane_state);
3271
3272 /*
3273 * Sometimes even remapping can't overcome
3274 * the stride limitations :( Can happen with
3275 * big plane sizes and suitably misaligned
3276 * offsets.
3277 */
3278 return intel_plane_check_stride(plane_state);
3279 }
3280
3281 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3282
3283 for (i = 0; i < num_planes; i++) {
3284 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3285 plane_state->color_plane[i].offset = 0;
3286
3287 if (drm_rotation_90_or_270(rotation)) {
3288 plane_state->color_plane[i].x = fb->rotated[i].x;
3289 plane_state->color_plane[i].y = fb->rotated[i].y;
3290 } else {
3291 plane_state->color_plane[i].x = fb->normal[i].x;
3292 plane_state->color_plane[i].y = fb->normal[i].y;
3293 }
3294 }
3295
3296 /* Rotate src coordinates to match rotated GTT view */
3297 if (drm_rotation_90_or_270(rotation))
3298 drm_rect_rotate(&plane_state->uapi.src,
3299 fb->base.width << 16, fb->base.height << 16,
3300 DRM_MODE_ROTATE_270);
3301
3302 return intel_plane_check_stride(plane_state);
3303 }
3304
3305 static int i9xx_format_to_fourcc(int format)
3306 {
3307 switch (format) {
3308 case DISPPLANE_8BPP:
3309 return DRM_FORMAT_C8;
3310 case DISPPLANE_BGRA555:
3311 return DRM_FORMAT_ARGB1555;
3312 case DISPPLANE_BGRX555:
3313 return DRM_FORMAT_XRGB1555;
3314 case DISPPLANE_BGRX565:
3315 return DRM_FORMAT_RGB565;
3316 default:
3317 case DISPPLANE_BGRX888:
3318 return DRM_FORMAT_XRGB8888;
3319 case DISPPLANE_RGBX888:
3320 return DRM_FORMAT_XBGR8888;
3321 case DISPPLANE_BGRA888:
3322 return DRM_FORMAT_ARGB8888;
3323 case DISPPLANE_RGBA888:
3324 return DRM_FORMAT_ABGR8888;
3325 case DISPPLANE_BGRX101010:
3326 return DRM_FORMAT_XRGB2101010;
3327 case DISPPLANE_RGBX101010:
3328 return DRM_FORMAT_XBGR2101010;
3329 case DISPPLANE_BGRA101010:
3330 return DRM_FORMAT_ARGB2101010;
3331 case DISPPLANE_RGBA101010:
3332 return DRM_FORMAT_ABGR2101010;
3333 case DISPPLANE_RGBX161616:
3334 return DRM_FORMAT_XBGR16161616F;
3335 }
3336 }
3337
3338 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3339 {
3340 switch (format) {
3341 case PLANE_CTL_FORMAT_RGB_565:
3342 return DRM_FORMAT_RGB565;
3343 case PLANE_CTL_FORMAT_NV12:
3344 return DRM_FORMAT_NV12;
3345 case PLANE_CTL_FORMAT_XYUV:
3346 return DRM_FORMAT_XYUV8888;
3347 case PLANE_CTL_FORMAT_P010:
3348 return DRM_FORMAT_P010;
3349 case PLANE_CTL_FORMAT_P012:
3350 return DRM_FORMAT_P012;
3351 case PLANE_CTL_FORMAT_P016:
3352 return DRM_FORMAT_P016;
3353 case PLANE_CTL_FORMAT_Y210:
3354 return DRM_FORMAT_Y210;
3355 case PLANE_CTL_FORMAT_Y212:
3356 return DRM_FORMAT_Y212;
3357 case PLANE_CTL_FORMAT_Y216:
3358 return DRM_FORMAT_Y216;
3359 case PLANE_CTL_FORMAT_Y410:
3360 return DRM_FORMAT_XVYU2101010;
3361 case PLANE_CTL_FORMAT_Y412:
3362 return DRM_FORMAT_XVYU12_16161616;
3363 case PLANE_CTL_FORMAT_Y416:
3364 return DRM_FORMAT_XVYU16161616;
3365 default:
3366 case PLANE_CTL_FORMAT_XRGB_8888:
3367 if (rgb_order) {
3368 if (alpha)
3369 return DRM_FORMAT_ABGR8888;
3370 else
3371 return DRM_FORMAT_XBGR8888;
3372 } else {
3373 if (alpha)
3374 return DRM_FORMAT_ARGB8888;
3375 else
3376 return DRM_FORMAT_XRGB8888;
3377 }
3378 case PLANE_CTL_FORMAT_XRGB_2101010:
3379 if (rgb_order) {
3380 if (alpha)
3381 return DRM_FORMAT_ABGR2101010;
3382 else
3383 return DRM_FORMAT_XBGR2101010;
3384 } else {
3385 if (alpha)
3386 return DRM_FORMAT_ARGB2101010;
3387 else
3388 return DRM_FORMAT_XRGB2101010;
3389 }
3390 case PLANE_CTL_FORMAT_XRGB_16161616F:
3391 if (rgb_order) {
3392 if (alpha)
3393 return DRM_FORMAT_ABGR16161616F;
3394 else
3395 return DRM_FORMAT_XBGR16161616F;
3396 } else {
3397 if (alpha)
3398 return DRM_FORMAT_ARGB16161616F;
3399 else
3400 return DRM_FORMAT_XRGB16161616F;
3401 }
3402 }
3403 }
3404
3405 static struct i915_vma *
3406 initial_plane_vma(struct drm_i915_private *i915,
3407 struct intel_initial_plane_config *plane_config)
3408 {
3409 struct drm_i915_gem_object *obj;
3410 struct i915_vma *vma;
3411 u32 base, size;
3412
3413 if (plane_config->size == 0)
3414 return NULL;
3415
3416 base = round_down(plane_config->base,
3417 I915_GTT_MIN_ALIGNMENT);
3418 size = round_up(plane_config->base + plane_config->size,
3419 I915_GTT_MIN_ALIGNMENT);
3420 size -= base;
3421
3422 /*
3423 * If the FB is too big, just don't use it since fbdev is not very
3424 * important and we should probably use that space with FBC or other
3425 * features.
3426 */
3427 if (size * 2 > i915->stolen_usable_size)
3428 return NULL;
3429
3430 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3431 if (IS_ERR(obj))
3432 return NULL;
3433
3434 switch (plane_config->tiling) {
3435 case I915_TILING_NONE:
3436 break;
3437 case I915_TILING_X:
3438 case I915_TILING_Y:
3439 obj->tiling_and_stride =
3440 plane_config->fb->base.pitches[0] |
3441 plane_config->tiling;
3442 break;
3443 default:
3444 MISSING_CASE(plane_config->tiling);
3445 goto err_obj;
3446 }
3447
3448 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3449 if (IS_ERR(vma))
3450 goto err_obj;
3451
3452 if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3453 goto err_obj;
3454
3455 if (i915_gem_object_is_tiled(obj) &&
3456 !i915_vma_is_map_and_fenceable(vma))
3457 goto err_obj;
3458
3459 return vma;
3460
3461 err_obj:
3462 i915_gem_object_put(obj);
3463 return NULL;
3464 }
3465
3466 static bool
3467 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3468 struct intel_initial_plane_config *plane_config)
3469 {
3470 struct drm_device *dev = crtc->base.dev;
3471 struct drm_i915_private *dev_priv = to_i915(dev);
3472 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3473 struct drm_framebuffer *fb = &plane_config->fb->base;
3474 struct i915_vma *vma;
3475
3476 switch (fb->modifier) {
3477 case DRM_FORMAT_MOD_LINEAR:
3478 case I915_FORMAT_MOD_X_TILED:
3479 case I915_FORMAT_MOD_Y_TILED:
3480 break;
3481 default:
3482 drm_dbg(&dev_priv->drm,
3483 "Unsupported modifier for initial FB: 0x%llx\n",
3484 fb->modifier);
3485 return false;
3486 }
3487
3488 vma = initial_plane_vma(dev_priv, plane_config);
3489 if (!vma)
3490 return false;
3491
3492 mode_cmd.pixel_format = fb->format->format;
3493 mode_cmd.width = fb->width;
3494 mode_cmd.height = fb->height;
3495 mode_cmd.pitches[0] = fb->pitches[0];
3496 mode_cmd.modifier[0] = fb->modifier;
3497 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3498
3499 if (intel_framebuffer_init(to_intel_framebuffer(fb),
3500 vma->obj, &mode_cmd)) {
3501 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3502 goto err_vma;
3503 }
3504
3505 plane_config->vma = vma;
3506 return true;
3507
3508 err_vma:
3509 i915_vma_put(vma);
3510 return false;
3511 }
3512
3513 static void
3514 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3515 struct intel_plane_state *plane_state,
3516 bool visible)
3517 {
3518 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3519
3520 plane_state->uapi.visible = visible;
3521
3522 if (visible)
3523 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3524 else
3525 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3526 }
3527
3528 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3529 {
3530 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3531 struct drm_plane *plane;
3532
3533 /*
3534 * Active_planes aliases if multiple "primary" or cursor planes
3535 * have been used on the same (or wrong) pipe. plane_mask uses
3536 * unique ids, hence we can use that to reconstruct active_planes.
3537 */
3538 crtc_state->active_planes = 0;
3539
3540 drm_for_each_plane_mask(plane, &dev_priv->drm,
3541 crtc_state->uapi.plane_mask)
3542 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3543 }
3544
3545 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3546 struct intel_plane *plane)
3547 {
3548 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3549 struct intel_crtc_state *crtc_state =
3550 to_intel_crtc_state(crtc->base.state);
3551 struct intel_plane_state *plane_state =
3552 to_intel_plane_state(plane->base.state);
3553
3554 drm_dbg_kms(&dev_priv->drm,
3555 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3556 plane->base.base.id, plane->base.name,
3557 crtc->base.base.id, crtc->base.name);
3558
3559 intel_set_plane_visible(crtc_state, plane_state, false);
3560 fixup_active_planes(crtc_state);
3561 crtc_state->data_rate[plane->id] = 0;
3562 crtc_state->min_cdclk[plane->id] = 0;
3563
3564 if (plane->id == PLANE_PRIMARY)
3565 hsw_disable_ips(crtc_state);
3566
3567 /*
3568 * Vblank time updates from the shadow to live plane control register
3569 * are blocked if the memory self-refresh mode is active at that
3570 * moment. So to make sure the plane gets truly disabled, disable
3571 * first the self-refresh mode. The self-refresh enable bit in turn
3572 * will be checked/applied by the HW only at the next frame start
3573 * event which is after the vblank start event, so we need to have a
3574 * wait-for-vblank between disabling the plane and the pipe.
3575 */
3576 if (HAS_GMCH(dev_priv) &&
3577 intel_set_memory_cxsr(dev_priv, false))
3578 intel_wait_for_vblank(dev_priv, crtc->pipe);
3579
3580 /*
3581 * Gen2 reports pipe underruns whenever all planes are disabled.
3582 * So disable underrun reporting before all the planes get disabled.
3583 */
3584 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3585 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3586
3587 intel_disable_plane(plane, crtc_state);
3588 }
3589
3590 static struct intel_frontbuffer *
3591 to_intel_frontbuffer(struct drm_framebuffer *fb)
3592 {
3593 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3594 }
3595
3596 static void
3597 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3598 struct intel_initial_plane_config *plane_config)
3599 {
3600 struct drm_device *dev = intel_crtc->base.dev;
3601 struct drm_i915_private *dev_priv = to_i915(dev);
3602 struct drm_crtc *c;
3603 struct drm_plane *primary = intel_crtc->base.primary;
3604 struct drm_plane_state *plane_state = primary->state;
3605 struct intel_plane *intel_plane = to_intel_plane(primary);
3606 struct intel_plane_state *intel_state =
3607 to_intel_plane_state(plane_state);
3608 struct drm_framebuffer *fb;
3609 struct i915_vma *vma;
3610
3611 if (!plane_config->fb)
3612 return;
3613
3614 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3615 fb = &plane_config->fb->base;
3616 vma = plane_config->vma;
3617 goto valid_fb;
3618 }
3619
3620 /*
3621 * Failed to alloc the obj, check to see if we should share
3622 * an fb with another CRTC instead
3623 */
3624 for_each_crtc(dev, c) {
3625 struct intel_plane_state *state;
3626
3627 if (c == &intel_crtc->base)
3628 continue;
3629
3630 if (!to_intel_crtc(c)->active)
3631 continue;
3632
3633 state = to_intel_plane_state(c->primary->state);
3634 if (!state->vma)
3635 continue;
3636
3637 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3638 fb = state->hw.fb;
3639 vma = state->vma;
3640 goto valid_fb;
3641 }
3642 }
3643
3644 /*
3645 * We've failed to reconstruct the BIOS FB. Current display state
3646 * indicates that the primary plane is visible, but has a NULL FB,
3647 * which will lead to problems later if we don't fix it up. The
3648 * simplest solution is to just disable the primary plane now and
3649 * pretend the BIOS never had it enabled.
3650 */
3651 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3652
3653 return;
3654
3655 valid_fb:
3656 intel_state->hw.rotation = plane_config->rotation;
3657 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3658 intel_state->hw.rotation);
3659 intel_state->color_plane[0].stride =
3660 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3661
3662 __i915_vma_pin(vma);
3663 intel_state->vma = i915_vma_get(vma);
3664 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3665 if (vma->fence)
3666 intel_state->flags |= PLANE_HAS_FENCE;
3667
3668 plane_state->src_x = 0;
3669 plane_state->src_y = 0;
3670 plane_state->src_w = fb->width << 16;
3671 plane_state->src_h = fb->height << 16;
3672
3673 plane_state->crtc_x = 0;
3674 plane_state->crtc_y = 0;
3675 plane_state->crtc_w = fb->width;
3676 plane_state->crtc_h = fb->height;
3677
3678 intel_state->uapi.src = drm_plane_state_src(plane_state);
3679 intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3680
3681 if (plane_config->tiling)
3682 dev_priv->preserve_bios_swizzle = true;
3683
3684 plane_state->fb = fb;
3685 drm_framebuffer_get(fb);
3686
3687 plane_state->crtc = &intel_crtc->base;
3688 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3689
3690 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3691
3692 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3693 &to_intel_frontbuffer(fb)->bits);
3694 }
3695
3696 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3697 int color_plane,
3698 unsigned int rotation)
3699 {
3700 int cpp = fb->format->cpp[color_plane];
3701
3702 switch (fb->modifier) {
3703 case DRM_FORMAT_MOD_LINEAR:
3704 case I915_FORMAT_MOD_X_TILED:
3705 /*
3706 * Validated limit is 4k, but has 5k should
3707 * work apart from the following features:
3708 * - Ytile (already limited to 4k)
3709 * - FP16 (already limited to 4k)
3710 * - render compression (already limited to 4k)
3711 * - KVMR sprite and cursor (don't care)
3712 * - horizontal panning (TODO verify this)
3713 * - pipe and plane scaling (TODO verify this)
3714 */
3715 if (cpp == 8)
3716 return 4096;
3717 else
3718 return 5120;
3719 case I915_FORMAT_MOD_Y_TILED_CCS:
3720 case I915_FORMAT_MOD_Yf_TILED_CCS:
3721 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
3722 /* FIXME AUX plane? */
3723 case I915_FORMAT_MOD_Y_TILED:
3724 case I915_FORMAT_MOD_Yf_TILED:
3725 if (cpp == 8)
3726 return 2048;
3727 else
3728 return 4096;
3729 default:
3730 MISSING_CASE(fb->modifier);
3731 return 2048;
3732 }
3733 }
3734
3735 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3736 int color_plane,
3737 unsigned int rotation)
3738 {
3739 int cpp = fb->format->cpp[color_plane];
3740
3741 switch (fb->modifier) {
3742 case DRM_FORMAT_MOD_LINEAR:
3743 case I915_FORMAT_MOD_X_TILED:
3744 if (cpp == 8)
3745 return 4096;
3746 else
3747 return 5120;
3748 case I915_FORMAT_MOD_Y_TILED_CCS:
3749 case I915_FORMAT_MOD_Yf_TILED_CCS:
3750 /* FIXME AUX plane? */
3751 case I915_FORMAT_MOD_Y_TILED:
3752 case I915_FORMAT_MOD_Yf_TILED:
3753 if (cpp == 8)
3754 return 2048;
3755 else
3756 return 5120;
3757 default:
3758 MISSING_CASE(fb->modifier);
3759 return 2048;
3760 }
3761 }
3762
3763 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3764 int color_plane,
3765 unsigned int rotation)
3766 {
3767 return 5120;
3768 }
3769
3770 static int skl_max_plane_height(void)
3771 {
3772 return 4096;
3773 }
3774
3775 static int icl_max_plane_height(void)
3776 {
3777 return 4320;
3778 }
3779
3780 static bool
3781 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3782 int main_x, int main_y, u32 main_offset,
3783 int ccs_plane)
3784 {
3785 const struct drm_framebuffer *fb = plane_state->hw.fb;
3786 int aux_x = plane_state->color_plane[ccs_plane].x;
3787 int aux_y = plane_state->color_plane[ccs_plane].y;
3788 u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3789 u32 alignment = intel_surf_alignment(fb, ccs_plane);
3790 int hsub;
3791 int vsub;
3792
3793 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3794 while (aux_offset >= main_offset && aux_y <= main_y) {
3795 int x, y;
3796
3797 if (aux_x == main_x && aux_y == main_y)
3798 break;
3799
3800 if (aux_offset == 0)
3801 break;
3802
3803 x = aux_x / hsub;
3804 y = aux_y / vsub;
3805 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3806 plane_state,
3807 ccs_plane,
3808 aux_offset,
3809 aux_offset -
3810 alignment);
3811 aux_x = x * hsub + aux_x % hsub;
3812 aux_y = y * vsub + aux_y % vsub;
3813 }
3814
3815 if (aux_x != main_x || aux_y != main_y)
3816 return false;
3817
3818 plane_state->color_plane[ccs_plane].offset = aux_offset;
3819 plane_state->color_plane[ccs_plane].x = aux_x;
3820 plane_state->color_plane[ccs_plane].y = aux_y;
3821
3822 return true;
3823 }
3824
3825 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3826 {
3827 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3828 const struct drm_framebuffer *fb = plane_state->hw.fb;
3829 unsigned int rotation = plane_state->hw.rotation;
3830 int x = plane_state->uapi.src.x1 >> 16;
3831 int y = plane_state->uapi.src.y1 >> 16;
3832 int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3833 int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3834 int max_width;
3835 int max_height;
3836 u32 alignment;
3837 u32 offset;
3838 int aux_plane = intel_main_to_aux_plane(fb, 0);
3839 u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3840
3841 if (INTEL_GEN(dev_priv) >= 11)
3842 max_width = icl_max_plane_width(fb, 0, rotation);
3843 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3844 max_width = glk_max_plane_width(fb, 0, rotation);
3845 else
3846 max_width = skl_max_plane_width(fb, 0, rotation);
3847
3848 if (INTEL_GEN(dev_priv) >= 11)
3849 max_height = icl_max_plane_height();
3850 else
3851 max_height = skl_max_plane_height();
3852
3853 if (w > max_width || h > max_height) {
3854 drm_dbg_kms(&dev_priv->drm,
3855 "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3856 w, h, max_width, max_height);
3857 return -EINVAL;
3858 }
3859
3860 intel_add_fb_offsets(&x, &y, plane_state, 0);
3861 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3862 alignment = intel_surf_alignment(fb, 0);
3863 if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
3864 return -EINVAL;
3865
3866 /*
3867 * AUX surface offset is specified as the distance from the
3868 * main surface offset, and it must be non-negative. Make
3869 * sure that is what we will get.
3870 */
3871 if (offset > aux_offset)
3872 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3873 offset, aux_offset & ~(alignment - 1));
3874
3875 /*
3876 * When using an X-tiled surface, the plane blows up
3877 * if the x offset + width exceed the stride.
3878 *
3879 * TODO: linear and Y-tiled seem fine, Yf untested,
3880 */
3881 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3882 int cpp = fb->format->cpp[0];
3883
3884 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3885 if (offset == 0) {
3886 drm_dbg_kms(&dev_priv->drm,
3887 "Unable to find suitable display surface offset due to X-tiling\n");
3888 return -EINVAL;
3889 }
3890
3891 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3892 offset, offset - alignment);
3893 }
3894 }
3895
3896 /*
3897 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3898 * they match with the main surface x/y offsets.
3899 */
3900 if (is_ccs_modifier(fb->modifier)) {
3901 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3902 offset, aux_plane)) {
3903 if (offset == 0)
3904 break;
3905
3906 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3907 offset, offset - alignment);
3908 }
3909
3910 if (x != plane_state->color_plane[aux_plane].x ||
3911 y != plane_state->color_plane[aux_plane].y) {
3912 drm_dbg_kms(&dev_priv->drm,
3913 "Unable to find suitable display surface offset due to CCS\n");
3914 return -EINVAL;
3915 }
3916 }
3917
3918 plane_state->color_plane[0].offset = offset;
3919 plane_state->color_plane[0].x = x;
3920 plane_state->color_plane[0].y = y;
3921
3922 /*
3923 * Put the final coordinates back so that the src
3924 * coordinate checks will see the right values.
3925 */
3926 drm_rect_translate_to(&plane_state->uapi.src,
3927 x << 16, y << 16);
3928
3929 return 0;
3930 }
3931
3932 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3933 {
3934 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
3935 const struct drm_framebuffer *fb = plane_state->hw.fb;
3936 unsigned int rotation = plane_state->hw.rotation;
3937 int uv_plane = 1;
3938 int max_width = skl_max_plane_width(fb, uv_plane, rotation);
3939 int max_height = 4096;
3940 int x = plane_state->uapi.src.x1 >> 17;
3941 int y = plane_state->uapi.src.y1 >> 17;
3942 int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3943 int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3944 u32 offset;
3945
3946 intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3947 offset = intel_plane_compute_aligned_offset(&x, &y,
3948 plane_state, uv_plane);
3949
3950 /* FIXME not quite sure how/if these apply to the chroma plane */
3951 if (w > max_width || h > max_height) {
3952 drm_dbg_kms(&i915->drm,
3953 "CbCr source size %dx%d too big (limit %dx%d)\n",
3954 w, h, max_width, max_height);
3955 return -EINVAL;
3956 }
3957
3958 if (is_ccs_modifier(fb->modifier)) {
3959 int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3960 int aux_offset = plane_state->color_plane[ccs_plane].offset;
3961 int alignment = intel_surf_alignment(fb, uv_plane);
3962
3963 if (offset > aux_offset)
3964 offset = intel_plane_adjust_aligned_offset(&x, &y,
3965 plane_state,
3966 uv_plane,
3967 offset,
3968 aux_offset & ~(alignment - 1));
3969
3970 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3971 offset, ccs_plane)) {
3972 if (offset == 0)
3973 break;
3974
3975 offset = intel_plane_adjust_aligned_offset(&x, &y,
3976 plane_state,
3977 uv_plane,
3978 offset, offset - alignment);
3979 }
3980
3981 if (x != plane_state->color_plane[ccs_plane].x ||
3982 y != plane_state->color_plane[ccs_plane].y) {
3983 drm_dbg_kms(&i915->drm,
3984 "Unable to find suitable display surface offset due to CCS\n");
3985 return -EINVAL;
3986 }
3987 }
3988
3989 plane_state->color_plane[uv_plane].offset = offset;
3990 plane_state->color_plane[uv_plane].x = x;
3991 plane_state->color_plane[uv_plane].y = y;
3992
3993 return 0;
3994 }
3995
3996 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3997 {
3998 const struct drm_framebuffer *fb = plane_state->hw.fb;
3999 int src_x = plane_state->uapi.src.x1 >> 16;
4000 int src_y = plane_state->uapi.src.y1 >> 16;
4001 u32 offset;
4002 int ccs_plane;
4003
4004 for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
4005 int main_hsub, main_vsub;
4006 int hsub, vsub;
4007 int x, y;
4008
4009 if (!is_ccs_plane(fb, ccs_plane))
4010 continue;
4011
4012 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
4013 ccs_to_main_plane(fb, ccs_plane));
4014 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
4015
4016 hsub *= main_hsub;
4017 vsub *= main_vsub;
4018 x = src_x / hsub;
4019 y = src_y / vsub;
4020
4021 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
4022
4023 offset = intel_plane_compute_aligned_offset(&x, &y,
4024 plane_state,
4025 ccs_plane);
4026
4027 plane_state->color_plane[ccs_plane].offset = offset;
4028 plane_state->color_plane[ccs_plane].x = (x * hsub +
4029 src_x % hsub) /
4030 main_hsub;
4031 plane_state->color_plane[ccs_plane].y = (y * vsub +
4032 src_y % vsub) /
4033 main_vsub;
4034 }
4035
4036 return 0;
4037 }
4038
4039 int skl_check_plane_surface(struct intel_plane_state *plane_state)
4040 {
4041 const struct drm_framebuffer *fb = plane_state->hw.fb;
4042 int ret;
4043 bool needs_aux = false;
4044
4045 ret = intel_plane_compute_gtt(plane_state);
4046 if (ret)
4047 return ret;
4048
4049 if (!plane_state->uapi.visible)
4050 return 0;
4051
4052 /*
4053 * Handle the AUX surface first since the main surface setup depends on
4054 * it.
4055 */
4056 if (is_ccs_modifier(fb->modifier)) {
4057 needs_aux = true;
4058 ret = skl_check_ccs_aux_surface(plane_state);
4059 if (ret)
4060 return ret;
4061 }
4062
4063 if (intel_format_info_is_yuv_semiplanar(fb->format,
4064 fb->modifier)) {
4065 needs_aux = true;
4066 ret = skl_check_nv12_aux_surface(plane_state);
4067 if (ret)
4068 return ret;
4069 }
4070
4071 if (!needs_aux) {
4072 int i;
4073
4074 for (i = 1; i < fb->format->num_planes; i++) {
4075 plane_state->color_plane[i].offset = ~0xfff;
4076 plane_state->color_plane[i].x = 0;
4077 plane_state->color_plane[i].y = 0;
4078 }
4079 }
4080
4081 ret = skl_check_main_surface(plane_state);
4082 if (ret)
4083 return ret;
4084
4085 return 0;
4086 }
4087
4088 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
4089 const struct intel_plane_state *plane_state,
4090 unsigned int *num, unsigned int *den)
4091 {
4092 const struct drm_framebuffer *fb = plane_state->hw.fb;
4093 unsigned int cpp = fb->format->cpp[0];
4094
4095 /*
4096 * g4x bspec says 64bpp pixel rate can't exceed 80%
4097 * of cdclk when the sprite plane is enabled on the
4098 * same pipe. ilk/snb bspec says 64bpp pixel rate is
4099 * never allowed to exceed 80% of cdclk. Let's just go
4100 * with the ilk/snb limit always.
4101 */
4102 if (cpp == 8) {
4103 *num = 10;
4104 *den = 8;
4105 } else {
4106 *num = 1;
4107 *den = 1;
4108 }
4109 }
4110
4111 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
4112 const struct intel_plane_state *plane_state)
4113 {
4114 unsigned int pixel_rate;
4115 unsigned int num, den;
4116
4117 /*
4118 * Note that crtc_state->pixel_rate accounts for both
4119 * horizontal and vertical panel fitter downscaling factors.
4120 * Pre-HSW bspec tells us to only consider the horizontal
4121 * downscaling factor here. We ignore that and just consider
4122 * both for simplicity.
4123 */
4124 pixel_rate = crtc_state->pixel_rate;
4125
4126 i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
4127
4128 /* two pixels per clock with double wide pipe */
4129 if (crtc_state->double_wide)
4130 den *= 2;
4131
4132 return DIV_ROUND_UP(pixel_rate * num, den);
4133 }
4134
4135 unsigned int
4136 i9xx_plane_max_stride(struct intel_plane *plane,
4137 u32 pixel_format, u64 modifier,
4138 unsigned int rotation)
4139 {
4140 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4141
4142 if (!HAS_GMCH(dev_priv)) {
4143 return 32*1024;
4144 } else if (INTEL_GEN(dev_priv) >= 4) {
4145 if (modifier == I915_FORMAT_MOD_X_TILED)
4146 return 16*1024;
4147 else
4148 return 32*1024;
4149 } else if (INTEL_GEN(dev_priv) >= 3) {
4150 if (modifier == I915_FORMAT_MOD_X_TILED)
4151 return 8*1024;
4152 else
4153 return 16*1024;
4154 } else {
4155 if (plane->i9xx_plane == PLANE_C)
4156 return 4*1024;
4157 else
4158 return 8*1024;
4159 }
4160 }
4161
4162 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4163 {
4164 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4165 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4166 u32 dspcntr = 0;
4167
4168 if (crtc_state->gamma_enable)
4169 dspcntr |= DISPPLANE_GAMMA_ENABLE;
4170
4171 if (crtc_state->csc_enable)
4172 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
4173
4174 if (INTEL_GEN(dev_priv) < 5)
4175 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
4176
4177 return dspcntr;
4178 }
4179
4180 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
4181 const struct intel_plane_state *plane_state)
4182 {
4183 struct drm_i915_private *dev_priv =
4184 to_i915(plane_state->uapi.plane->dev);
4185 const struct drm_framebuffer *fb = plane_state->hw.fb;
4186 unsigned int rotation = plane_state->hw.rotation;
4187 u32 dspcntr;
4188
4189 dspcntr = DISPLAY_PLANE_ENABLE;
4190
4191 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
4192 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
4193 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
4194
4195 switch (fb->format->format) {
4196 case DRM_FORMAT_C8:
4197 dspcntr |= DISPPLANE_8BPP;
4198 break;
4199 case DRM_FORMAT_XRGB1555:
4200 dspcntr |= DISPPLANE_BGRX555;
4201 break;
4202 case DRM_FORMAT_ARGB1555:
4203 dspcntr |= DISPPLANE_BGRA555;
4204 break;
4205 case DRM_FORMAT_RGB565:
4206 dspcntr |= DISPPLANE_BGRX565;
4207 break;
4208 case DRM_FORMAT_XRGB8888:
4209 dspcntr |= DISPPLANE_BGRX888;
4210 break;
4211 case DRM_FORMAT_XBGR8888:
4212 dspcntr |= DISPPLANE_RGBX888;
4213 break;
4214 case DRM_FORMAT_ARGB8888:
4215 dspcntr |= DISPPLANE_BGRA888;
4216 break;
4217 case DRM_FORMAT_ABGR8888:
4218 dspcntr |= DISPPLANE_RGBA888;
4219 break;
4220 case DRM_FORMAT_XRGB2101010:
4221 dspcntr |= DISPPLANE_BGRX101010;
4222 break;
4223 case DRM_FORMAT_XBGR2101010:
4224 dspcntr |= DISPPLANE_RGBX101010;
4225 break;
4226 case DRM_FORMAT_ARGB2101010:
4227 dspcntr |= DISPPLANE_BGRA101010;
4228 break;
4229 case DRM_FORMAT_ABGR2101010:
4230 dspcntr |= DISPPLANE_RGBA101010;
4231 break;
4232 case DRM_FORMAT_XBGR16161616F:
4233 dspcntr |= DISPPLANE_RGBX161616;
4234 break;
4235 default:
4236 MISSING_CASE(fb->format->format);
4237 return 0;
4238 }
4239
4240 if (INTEL_GEN(dev_priv) >= 4 &&
4241 fb->modifier == I915_FORMAT_MOD_X_TILED)
4242 dspcntr |= DISPPLANE_TILED;
4243
4244 if (rotation & DRM_MODE_ROTATE_180)
4245 dspcntr |= DISPPLANE_ROTATE_180;
4246
4247 if (rotation & DRM_MODE_REFLECT_X)
4248 dspcntr |= DISPPLANE_MIRROR;
4249
4250 return dspcntr;
4251 }
4252
4253 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4254 {
4255 struct drm_i915_private *dev_priv =
4256 to_i915(plane_state->uapi.plane->dev);
4257 const struct drm_framebuffer *fb = plane_state->hw.fb;
4258 int src_x, src_y, src_w;
4259 u32 offset;
4260 int ret;
4261
4262 ret = intel_plane_compute_gtt(plane_state);
4263 if (ret)
4264 return ret;
4265
4266 if (!plane_state->uapi.visible)
4267 return 0;
4268
4269 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4270 src_x = plane_state->uapi.src.x1 >> 16;
4271 src_y = plane_state->uapi.src.y1 >> 16;
4272
4273 /* Undocumented hardware limit on i965/g4x/vlv/chv */
4274 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4275 return -EINVAL;
4276
4277 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4278
4279 if (INTEL_GEN(dev_priv) >= 4)
4280 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4281 plane_state, 0);
4282 else
4283 offset = 0;
4284
4285 /*
4286 * Put the final coordinates back so that the src
4287 * coordinate checks will see the right values.
4288 */
4289 drm_rect_translate_to(&plane_state->uapi.src,
4290 src_x << 16, src_y << 16);
4291
4292 /* HSW/BDW do this automagically in hardware */
4293 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4294 unsigned int rotation = plane_state->hw.rotation;
4295 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4296 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4297
4298 if (rotation & DRM_MODE_ROTATE_180) {
4299 src_x += src_w - 1;
4300 src_y += src_h - 1;
4301 } else if (rotation & DRM_MODE_REFLECT_X) {
4302 src_x += src_w - 1;
4303 }
4304 }
4305
4306 plane_state->color_plane[0].offset = offset;
4307 plane_state->color_plane[0].x = src_x;
4308 plane_state->color_plane[0].y = src_y;
4309
4310 return 0;
4311 }
4312
4313 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4314 {
4315 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4316 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4317
4318 if (IS_CHERRYVIEW(dev_priv))
4319 return i9xx_plane == PLANE_B;
4320 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4321 return false;
4322 else if (IS_GEN(dev_priv, 4))
4323 return i9xx_plane == PLANE_C;
4324 else
4325 return i9xx_plane == PLANE_B ||
4326 i9xx_plane == PLANE_C;
4327 }
4328
4329 static int
4330 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4331 struct intel_plane_state *plane_state)
4332 {
4333 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4334 int ret;
4335
4336 ret = chv_plane_check_rotation(plane_state);
4337 if (ret)
4338 return ret;
4339
4340 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4341 &crtc_state->uapi,
4342 DRM_PLANE_HELPER_NO_SCALING,
4343 DRM_PLANE_HELPER_NO_SCALING,
4344 i9xx_plane_has_windowing(plane),
4345 true);
4346 if (ret)
4347 return ret;
4348
4349 ret = i9xx_check_plane_surface(plane_state);
4350 if (ret)
4351 return ret;
4352
4353 if (!plane_state->uapi.visible)
4354 return 0;
4355
4356 ret = intel_plane_check_src_coordinates(plane_state);
4357 if (ret)
4358 return ret;
4359
4360 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4361
4362 return 0;
4363 }
4364
4365 static void i9xx_update_plane(struct intel_plane *plane,
4366 const struct intel_crtc_state *crtc_state,
4367 const struct intel_plane_state *plane_state)
4368 {
4369 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4370 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4371 u32 linear_offset;
4372 int x = plane_state->color_plane[0].x;
4373 int y = plane_state->color_plane[0].y;
4374 int crtc_x = plane_state->uapi.dst.x1;
4375 int crtc_y = plane_state->uapi.dst.y1;
4376 int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4377 int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4378 unsigned long irqflags;
4379 u32 dspaddr_offset;
4380 u32 dspcntr;
4381
4382 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4383
4384 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4385
4386 if (INTEL_GEN(dev_priv) >= 4)
4387 dspaddr_offset = plane_state->color_plane[0].offset;
4388 else
4389 dspaddr_offset = linear_offset;
4390
4391 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4392
4393 intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
4394 plane_state->color_plane[0].stride);
4395
4396 if (INTEL_GEN(dev_priv) < 4) {
4397 /*
4398 * PLANE_A doesn't actually have a full window
4399 * generator but let's assume we still need to
4400 * program whatever is there.
4401 */
4402 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
4403 (crtc_y << 16) | crtc_x);
4404 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
4405 ((crtc_h - 1) << 16) | (crtc_w - 1));
4406 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4407 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
4408 (crtc_y << 16) | crtc_x);
4409 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
4410 ((crtc_h - 1) << 16) | (crtc_w - 1));
4411 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
4412 }
4413
4414 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4415 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
4416 (y << 16) | x);
4417 } else if (INTEL_GEN(dev_priv) >= 4) {
4418 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
4419 linear_offset);
4420 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
4421 (y << 16) | x);
4422 }
4423
4424 /*
4425 * The control register self-arms if the plane was previously
4426 * disabled. Try to make the plane enable atomic by writing
4427 * the control register just before the surface register.
4428 */
4429 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4430 if (INTEL_GEN(dev_priv) >= 4)
4431 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
4432 intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4433 else
4434 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
4435 intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4436
4437 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4438 }
4439
4440 static void i9xx_disable_plane(struct intel_plane *plane,
4441 const struct intel_crtc_state *crtc_state)
4442 {
4443 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4444 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4445 unsigned long irqflags;
4446 u32 dspcntr;
4447
4448 /*
4449 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4450 * enable on ilk+ affect the pipe bottom color as
4451 * well, so we must configure them even if the plane
4452 * is disabled.
4453 *
4454 * On pre-g4x there is no way to gamma correct the
4455 * pipe bottom color but we'll keep on doing this
4456 * anyway so that the crtc state readout works correctly.
4457 */
4458 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4459
4460 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4461
4462 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4463 if (INTEL_GEN(dev_priv) >= 4)
4464 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
4465 else
4466 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
4467
4468 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4469 }
4470
4471 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4472 enum pipe *pipe)
4473 {
4474 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4475 enum intel_display_power_domain power_domain;
4476 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4477 intel_wakeref_t wakeref;
4478 bool ret;
4479 u32 val;
4480
4481 /*
4482 * Not 100% correct for planes that can move between pipes,
4483 * but that's only the case for gen2-4 which don't have any
4484 * display power wells.
4485 */
4486 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4487 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4488 if (!wakeref)
4489 return false;
4490
4491 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4492
4493 ret = val & DISPLAY_PLANE_ENABLE;
4494
4495 if (INTEL_GEN(dev_priv) >= 5)
4496 *pipe = plane->pipe;
4497 else
4498 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4499 DISPPLANE_SEL_PIPE_SHIFT;
4500
4501 intel_display_power_put(dev_priv, power_domain, wakeref);
4502
4503 return ret;
4504 }
4505
4506 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4507 {
4508 struct drm_device *dev = intel_crtc->base.dev;
4509 struct drm_i915_private *dev_priv = to_i915(dev);
4510 unsigned long irqflags;
4511
4512 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4513
4514 intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4515 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4516 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4517
4518 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4519 }
4520
4521 /*
4522 * This function detaches (aka. unbinds) unused scalers in hardware
4523 */
4524 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4525 {
4526 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4527 const struct intel_crtc_scaler_state *scaler_state =
4528 &crtc_state->scaler_state;
4529 int i;
4530
4531 /* loop through and disable scalers that aren't in use */
4532 for (i = 0; i < intel_crtc->num_scalers; i++) {
4533 if (!scaler_state->scalers[i].in_use)
4534 skl_detach_scaler(intel_crtc, i);
4535 }
4536 }
4537
4538 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4539 int color_plane, unsigned int rotation)
4540 {
4541 /*
4542 * The stride is either expressed as a multiple of 64 bytes chunks for
4543 * linear buffers or in number of tiles for tiled buffers.
4544 */
4545 if (is_surface_linear(fb, color_plane))
4546 return 64;
4547 else if (drm_rotation_90_or_270(rotation))
4548 return intel_tile_height(fb, color_plane);
4549 else
4550 return intel_tile_width_bytes(fb, color_plane);
4551 }
4552
4553 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4554 int color_plane)
4555 {
4556 const struct drm_framebuffer *fb = plane_state->hw.fb;
4557 unsigned int rotation = plane_state->hw.rotation;
4558 u32 stride = plane_state->color_plane[color_plane].stride;
4559
4560 if (color_plane >= fb->format->num_planes)
4561 return 0;
4562
4563 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4564 }
4565
4566 static u32 skl_plane_ctl_format(u32 pixel_format)
4567 {
4568 switch (pixel_format) {
4569 case DRM_FORMAT_C8:
4570 return PLANE_CTL_FORMAT_INDEXED;
4571 case DRM_FORMAT_RGB565:
4572 return PLANE_CTL_FORMAT_RGB_565;
4573 case DRM_FORMAT_XBGR8888:
4574 case DRM_FORMAT_ABGR8888:
4575 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4576 case DRM_FORMAT_XRGB8888:
4577 case DRM_FORMAT_ARGB8888:
4578 return PLANE_CTL_FORMAT_XRGB_8888;
4579 case DRM_FORMAT_XBGR2101010:
4580 case DRM_FORMAT_ABGR2101010:
4581 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4582 case DRM_FORMAT_XRGB2101010:
4583 case DRM_FORMAT_ARGB2101010:
4584 return PLANE_CTL_FORMAT_XRGB_2101010;
4585 case DRM_FORMAT_XBGR16161616F:
4586 case DRM_FORMAT_ABGR16161616F:
4587 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4588 case DRM_FORMAT_XRGB16161616F:
4589 case DRM_FORMAT_ARGB16161616F:
4590 return PLANE_CTL_FORMAT_XRGB_16161616F;
4591 case DRM_FORMAT_XYUV8888:
4592 return PLANE_CTL_FORMAT_XYUV;
4593 case DRM_FORMAT_YUYV:
4594 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4595 case DRM_FORMAT_YVYU:
4596 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4597 case DRM_FORMAT_UYVY:
4598 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4599 case DRM_FORMAT_VYUY:
4600 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4601 case DRM_FORMAT_NV12:
4602 return PLANE_CTL_FORMAT_NV12;
4603 case DRM_FORMAT_P010:
4604 return PLANE_CTL_FORMAT_P010;
4605 case DRM_FORMAT_P012:
4606 return PLANE_CTL_FORMAT_P012;
4607 case DRM_FORMAT_P016:
4608 return PLANE_CTL_FORMAT_P016;
4609 case DRM_FORMAT_Y210:
4610 return PLANE_CTL_FORMAT_Y210;
4611 case DRM_FORMAT_Y212:
4612 return PLANE_CTL_FORMAT_Y212;
4613 case DRM_FORMAT_Y216:
4614 return PLANE_CTL_FORMAT_Y216;
4615 case DRM_FORMAT_XVYU2101010:
4616 return PLANE_CTL_FORMAT_Y410;
4617 case DRM_FORMAT_XVYU12_16161616:
4618 return PLANE_CTL_FORMAT_Y412;
4619 case DRM_FORMAT_XVYU16161616:
4620 return PLANE_CTL_FORMAT_Y416;
4621 default:
4622 MISSING_CASE(pixel_format);
4623 }
4624
4625 return 0;
4626 }
4627
4628 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4629 {
4630 if (!plane_state->hw.fb->format->has_alpha)
4631 return PLANE_CTL_ALPHA_DISABLE;
4632
4633 switch (plane_state->hw.pixel_blend_mode) {
4634 case DRM_MODE_BLEND_PIXEL_NONE:
4635 return PLANE_CTL_ALPHA_DISABLE;
4636 case DRM_MODE_BLEND_PREMULTI:
4637 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4638 case DRM_MODE_BLEND_COVERAGE:
4639 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4640 default:
4641 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4642 return PLANE_CTL_ALPHA_DISABLE;
4643 }
4644 }
4645
4646 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4647 {
4648 if (!plane_state->hw.fb->format->has_alpha)
4649 return PLANE_COLOR_ALPHA_DISABLE;
4650
4651 switch (plane_state->hw.pixel_blend_mode) {
4652 case DRM_MODE_BLEND_PIXEL_NONE:
4653 return PLANE_COLOR_ALPHA_DISABLE;
4654 case DRM_MODE_BLEND_PREMULTI:
4655 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4656 case DRM_MODE_BLEND_COVERAGE:
4657 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4658 default:
4659 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4660 return PLANE_COLOR_ALPHA_DISABLE;
4661 }
4662 }
4663
4664 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4665 {
4666 switch (fb_modifier) {
4667 case DRM_FORMAT_MOD_LINEAR:
4668 break;
4669 case I915_FORMAT_MOD_X_TILED:
4670 return PLANE_CTL_TILED_X;
4671 case I915_FORMAT_MOD_Y_TILED:
4672 return PLANE_CTL_TILED_Y;
4673 case I915_FORMAT_MOD_Y_TILED_CCS:
4674 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4675 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4676 return PLANE_CTL_TILED_Y |
4677 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4678 PLANE_CTL_CLEAR_COLOR_DISABLE;
4679 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4680 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4681 case I915_FORMAT_MOD_Yf_TILED:
4682 return PLANE_CTL_TILED_YF;
4683 case I915_FORMAT_MOD_Yf_TILED_CCS:
4684 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4685 default:
4686 MISSING_CASE(fb_modifier);
4687 }
4688
4689 return 0;
4690 }
4691
4692 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4693 {
4694 switch (rotate) {
4695 case DRM_MODE_ROTATE_0:
4696 break;
4697 /*
4698 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4699 * while i915 HW rotation is clockwise, thats why this swapping.
4700 */
4701 case DRM_MODE_ROTATE_90:
4702 return PLANE_CTL_ROTATE_270;
4703 case DRM_MODE_ROTATE_180:
4704 return PLANE_CTL_ROTATE_180;
4705 case DRM_MODE_ROTATE_270:
4706 return PLANE_CTL_ROTATE_90;
4707 default:
4708 MISSING_CASE(rotate);
4709 }
4710
4711 return 0;
4712 }
4713
4714 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4715 {
4716 switch (reflect) {
4717 case 0:
4718 break;
4719 case DRM_MODE_REFLECT_X:
4720 return PLANE_CTL_FLIP_HORIZONTAL;
4721 case DRM_MODE_REFLECT_Y:
4722 default:
4723 MISSING_CASE(reflect);
4724 }
4725
4726 return 0;
4727 }
4728
4729 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4730 {
4731 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4732 u32 plane_ctl = 0;
4733
4734 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4735 return plane_ctl;
4736
4737 if (crtc_state->gamma_enable)
4738 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4739
4740 if (crtc_state->csc_enable)
4741 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4742
4743 return plane_ctl;
4744 }
4745
4746 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4747 const struct intel_plane_state *plane_state)
4748 {
4749 struct drm_i915_private *dev_priv =
4750 to_i915(plane_state->uapi.plane->dev);
4751 const struct drm_framebuffer *fb = plane_state->hw.fb;
4752 unsigned int rotation = plane_state->hw.rotation;
4753 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4754 u32 plane_ctl;
4755
4756 plane_ctl = PLANE_CTL_ENABLE;
4757
4758 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4759 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4760 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4761
4762 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4763 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4764
4765 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4766 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4767 }
4768
4769 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4770 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4771 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4772
4773 if (INTEL_GEN(dev_priv) >= 10)
4774 plane_ctl |= cnl_plane_ctl_flip(rotation &
4775 DRM_MODE_REFLECT_MASK);
4776
4777 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4778 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4779 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4780 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4781
4782 return plane_ctl;
4783 }
4784
4785 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4786 {
4787 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4788 u32 plane_color_ctl = 0;
4789
4790 if (INTEL_GEN(dev_priv) >= 11)
4791 return plane_color_ctl;
4792
4793 if (crtc_state->gamma_enable)
4794 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4795
4796 if (crtc_state->csc_enable)
4797 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4798
4799 return plane_color_ctl;
4800 }
4801
4802 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4803 const struct intel_plane_state *plane_state)
4804 {
4805 struct drm_i915_private *dev_priv =
4806 to_i915(plane_state->uapi.plane->dev);
4807 const struct drm_framebuffer *fb = plane_state->hw.fb;
4808 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4809 u32 plane_color_ctl = 0;
4810
4811 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4812 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4813
4814 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4815 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4816 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4817 else
4818 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4819
4820 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4821 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4822 } else if (fb->format->is_yuv) {
4823 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4824 }
4825
4826 return plane_color_ctl;
4827 }
4828
4829 static int
4830 __intel_display_resume(struct drm_device *dev,
4831 struct drm_atomic_state *state,
4832 struct drm_modeset_acquire_ctx *ctx)
4833 {
4834 struct drm_crtc_state *crtc_state;
4835 struct drm_crtc *crtc;
4836 int i, ret;
4837
4838 intel_modeset_setup_hw_state(dev, ctx);
4839 intel_vga_redisable(to_i915(dev));
4840
4841 if (!state)
4842 return 0;
4843
4844 /*
4845 * We've duplicated the state, pointers to the old state are invalid.
4846 *
4847 * Don't attempt to use the old state until we commit the duplicated state.
4848 */
4849 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4850 /*
4851 * Force recalculation even if we restore
4852 * current state. With fast modeset this may not result
4853 * in a modeset when the state is compatible.
4854 */
4855 crtc_state->mode_changed = true;
4856 }
4857
4858 /* ignore any reset values/BIOS leftovers in the WM registers */
4859 if (!HAS_GMCH(to_i915(dev)))
4860 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4861
4862 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4863
4864 drm_WARN_ON(dev, ret == -EDEADLK);
4865 return ret;
4866 }
4867
4868 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4869 {
4870 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4871 intel_has_gpu_reset(&dev_priv->gt));
4872 }
4873
4874 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4875 {
4876 struct drm_device *dev = &dev_priv->drm;
4877 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4878 struct drm_atomic_state *state;
4879 int ret;
4880
4881 /* reset doesn't touch the display */
4882 if (!i915_modparams.force_reset_modeset_test &&
4883 !gpu_reset_clobbers_display(dev_priv))
4884 return;
4885
4886 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4887 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4888 smp_mb__after_atomic();
4889 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4890
4891 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4892 drm_dbg_kms(&dev_priv->drm,
4893 "Modeset potentially stuck, unbreaking through wedging\n");
4894 intel_gt_set_wedged(&dev_priv->gt);
4895 }
4896
4897 /*
4898 * Need mode_config.mutex so that we don't
4899 * trample ongoing ->detect() and whatnot.
4900 */
4901 mutex_lock(&dev->mode_config.mutex);
4902 drm_modeset_acquire_init(ctx, 0);
4903 while (1) {
4904 ret = drm_modeset_lock_all_ctx(dev, ctx);
4905 if (ret != -EDEADLK)
4906 break;
4907
4908 drm_modeset_backoff(ctx);
4909 }
4910 /*
4911 * Disabling the crtcs gracefully seems nicer. Also the
4912 * g33 docs say we should at least disable all the planes.
4913 */
4914 state = drm_atomic_helper_duplicate_state(dev, ctx);
4915 if (IS_ERR(state)) {
4916 ret = PTR_ERR(state);
4917 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4918 ret);
4919 return;
4920 }
4921
4922 ret = drm_atomic_helper_disable_all(dev, ctx);
4923 if (ret) {
4924 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4925 ret);
4926 drm_atomic_state_put(state);
4927 return;
4928 }
4929
4930 dev_priv->modeset_restore_state = state;
4931 state->acquire_ctx = ctx;
4932 }
4933
4934 void intel_finish_reset(struct drm_i915_private *dev_priv)
4935 {
4936 struct drm_device *dev = &dev_priv->drm;
4937 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4938 struct drm_atomic_state *state;
4939 int ret;
4940
4941 /* reset doesn't touch the display */
4942 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4943 return;
4944
4945 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4946 if (!state)
4947 goto unlock;
4948
4949 /* reset doesn't touch the display */
4950 if (!gpu_reset_clobbers_display(dev_priv)) {
4951 /* for testing only restore the display */
4952 ret = __intel_display_resume(dev, state, ctx);
4953 if (ret)
4954 drm_err(&dev_priv->drm,
4955 "Restoring old state failed with %i\n", ret);
4956 } else {
4957 /*
4958 * The display has been reset as well,
4959 * so need a full re-initialization.
4960 */
4961 intel_pps_unlock_regs_wa(dev_priv);
4962 intel_modeset_init_hw(dev_priv);
4963 intel_init_clock_gating(dev_priv);
4964
4965 spin_lock_irq(&dev_priv->irq_lock);
4966 if (dev_priv->display.hpd_irq_setup)
4967 dev_priv->display.hpd_irq_setup(dev_priv);
4968 spin_unlock_irq(&dev_priv->irq_lock);
4969
4970 ret = __intel_display_resume(dev, state, ctx);
4971 if (ret)
4972 drm_err(&dev_priv->drm,
4973 "Restoring old state failed with %i\n", ret);
4974
4975 intel_hpd_init(dev_priv);
4976 }
4977
4978 drm_atomic_state_put(state);
4979 unlock:
4980 drm_modeset_drop_locks(ctx);
4981 drm_modeset_acquire_fini(ctx);
4982 mutex_unlock(&dev->mode_config.mutex);
4983
4984 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4985 }
4986
4987 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4988 {
4989 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4990 enum pipe pipe = crtc->pipe;
4991 u32 tmp;
4992
4993 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
4994
4995 /*
4996 * Display WA #1153: icl
4997 * enable hardware to bypass the alpha math
4998 * and rounding for per-pixel values 00 and 0xff
4999 */
5000 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
5001 /*
5002 * Display WA # 1605353570: icl
5003 * Set the pixel rounding bit to 1 for allowing
5004 * passthrough of Frame buffer pixels unmodified
5005 * across pipe
5006 */
5007 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
5008 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
5009 }
5010
5011 static void intel_fdi_normal_train(struct intel_crtc *crtc)
5012 {
5013 struct drm_device *dev = crtc->base.dev;
5014 struct drm_i915_private *dev_priv = to_i915(dev);
5015 enum pipe pipe = crtc->pipe;
5016 i915_reg_t reg;
5017 u32 temp;
5018
5019 /* enable normal train */
5020 reg = FDI_TX_CTL(pipe);
5021 temp = intel_de_read(dev_priv, reg);
5022 if (IS_IVYBRIDGE(dev_priv)) {
5023 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5024 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
5025 } else {
5026 temp &= ~FDI_LINK_TRAIN_NONE;
5027 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
5028 }
5029 intel_de_write(dev_priv, reg, temp);
5030
5031 reg = FDI_RX_CTL(pipe);
5032 temp = intel_de_read(dev_priv, reg);
5033 if (HAS_PCH_CPT(dev_priv)) {
5034 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5035 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
5036 } else {
5037 temp &= ~FDI_LINK_TRAIN_NONE;
5038 temp |= FDI_LINK_TRAIN_NONE;
5039 }
5040 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
5041
5042 /* wait one idle pattern time */
5043 intel_de_posting_read(dev_priv, reg);
5044 udelay(1000);
5045
5046 /* IVB wants error correction enabled */
5047 if (IS_IVYBRIDGE(dev_priv))
5048 intel_de_write(dev_priv, reg,
5049 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
5050 }
5051
5052 /* The FDI link training functions for ILK/Ibexpeak. */
5053 static void ilk_fdi_link_train(struct intel_crtc *crtc,
5054 const struct intel_crtc_state *crtc_state)
5055 {
5056 struct drm_device *dev = crtc->base.dev;
5057 struct drm_i915_private *dev_priv = to_i915(dev);
5058 enum pipe pipe = crtc->pipe;
5059 i915_reg_t reg;
5060 u32 temp, tries;
5061
5062 /* FDI needs bits from pipe first */
5063 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
5064
5065 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5066 for train result */
5067 reg = FDI_RX_IMR(pipe);
5068 temp = intel_de_read(dev_priv, reg);
5069 temp &= ~FDI_RX_SYMBOL_LOCK;
5070 temp &= ~FDI_RX_BIT_LOCK;
5071 intel_de_write(dev_priv, reg, temp);
5072 intel_de_read(dev_priv, reg);
5073 udelay(150);
5074
5075 /* enable CPU FDI TX and PCH FDI RX */
5076 reg = FDI_TX_CTL(pipe);
5077 temp = intel_de_read(dev_priv, reg);
5078 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5079 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5080 temp &= ~FDI_LINK_TRAIN_NONE;
5081 temp |= FDI_LINK_TRAIN_PATTERN_1;
5082 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5083
5084 reg = FDI_RX_CTL(pipe);
5085 temp = intel_de_read(dev_priv, reg);
5086 temp &= ~FDI_LINK_TRAIN_NONE;
5087 temp |= FDI_LINK_TRAIN_PATTERN_1;
5088 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5089
5090 intel_de_posting_read(dev_priv, reg);
5091 udelay(150);
5092
5093 /* Ironlake workaround, enable clock pointer after FDI enable*/
5094 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5095 FDI_RX_PHASE_SYNC_POINTER_OVR);
5096 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5097 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
5098
5099 reg = FDI_RX_IIR(pipe);
5100 for (tries = 0; tries < 5; tries++) {
5101 temp = intel_de_read(dev_priv, reg);
5102 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5103
5104 if ((temp & FDI_RX_BIT_LOCK)) {
5105 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
5106 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
5107 break;
5108 }
5109 }
5110 if (tries == 5)
5111 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5112
5113 /* Train 2 */
5114 reg = FDI_TX_CTL(pipe);
5115 temp = intel_de_read(dev_priv, reg);
5116 temp &= ~FDI_LINK_TRAIN_NONE;
5117 temp |= FDI_LINK_TRAIN_PATTERN_2;
5118 intel_de_write(dev_priv, reg, temp);
5119
5120 reg = FDI_RX_CTL(pipe);
5121 temp = intel_de_read(dev_priv, reg);
5122 temp &= ~FDI_LINK_TRAIN_NONE;
5123 temp |= FDI_LINK_TRAIN_PATTERN_2;
5124 intel_de_write(dev_priv, reg, temp);
5125
5126 intel_de_posting_read(dev_priv, reg);
5127 udelay(150);
5128
5129 reg = FDI_RX_IIR(pipe);
5130 for (tries = 0; tries < 5; tries++) {
5131 temp = intel_de_read(dev_priv, reg);
5132 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5133
5134 if (temp & FDI_RX_SYMBOL_LOCK) {
5135 intel_de_write(dev_priv, reg,
5136 temp | FDI_RX_SYMBOL_LOCK);
5137 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
5138 break;
5139 }
5140 }
5141 if (tries == 5)
5142 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5143
5144 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
5145
5146 }
5147
5148 static const int snb_b_fdi_train_param[] = {
5149 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
5150 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
5151 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
5152 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
5153 };
5154
5155 /* The FDI link training functions for SNB/Cougarpoint. */
5156 static void gen6_fdi_link_train(struct intel_crtc *crtc,
5157 const struct intel_crtc_state *crtc_state)
5158 {
5159 struct drm_device *dev = crtc->base.dev;
5160 struct drm_i915_private *dev_priv = to_i915(dev);
5161 enum pipe pipe = crtc->pipe;
5162 i915_reg_t reg;
5163 u32 temp, i, retry;
5164
5165 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5166 for train result */
5167 reg = FDI_RX_IMR(pipe);
5168 temp = intel_de_read(dev_priv, reg);
5169 temp &= ~FDI_RX_SYMBOL_LOCK;
5170 temp &= ~FDI_RX_BIT_LOCK;
5171 intel_de_write(dev_priv, reg, temp);
5172
5173 intel_de_posting_read(dev_priv, reg);
5174 udelay(150);
5175
5176 /* enable CPU FDI TX and PCH FDI RX */
5177 reg = FDI_TX_CTL(pipe);
5178 temp = intel_de_read(dev_priv, reg);
5179 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5180 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5181 temp &= ~FDI_LINK_TRAIN_NONE;
5182 temp |= FDI_LINK_TRAIN_PATTERN_1;
5183 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5184 /* SNB-B */
5185 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5186 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5187
5188 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5189 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5190
5191 reg = FDI_RX_CTL(pipe);
5192 temp = intel_de_read(dev_priv, reg);
5193 if (HAS_PCH_CPT(dev_priv)) {
5194 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5195 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5196 } else {
5197 temp &= ~FDI_LINK_TRAIN_NONE;
5198 temp |= FDI_LINK_TRAIN_PATTERN_1;
5199 }
5200 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5201
5202 intel_de_posting_read(dev_priv, reg);
5203 udelay(150);
5204
5205 for (i = 0; i < 4; i++) {
5206 reg = FDI_TX_CTL(pipe);
5207 temp = intel_de_read(dev_priv, reg);
5208 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5209 temp |= snb_b_fdi_train_param[i];
5210 intel_de_write(dev_priv, reg, temp);
5211
5212 intel_de_posting_read(dev_priv, reg);
5213 udelay(500);
5214
5215 for (retry = 0; retry < 5; retry++) {
5216 reg = FDI_RX_IIR(pipe);
5217 temp = intel_de_read(dev_priv, reg);
5218 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5219 if (temp & FDI_RX_BIT_LOCK) {
5220 intel_de_write(dev_priv, reg,
5221 temp | FDI_RX_BIT_LOCK);
5222 drm_dbg_kms(&dev_priv->drm,
5223 "FDI train 1 done.\n");
5224 break;
5225 }
5226 udelay(50);
5227 }
5228 if (retry < 5)
5229 break;
5230 }
5231 if (i == 4)
5232 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5233
5234 /* Train 2 */
5235 reg = FDI_TX_CTL(pipe);
5236 temp = intel_de_read(dev_priv, reg);
5237 temp &= ~FDI_LINK_TRAIN_NONE;
5238 temp |= FDI_LINK_TRAIN_PATTERN_2;
5239 if (IS_GEN(dev_priv, 6)) {
5240 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5241 /* SNB-B */
5242 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5243 }
5244 intel_de_write(dev_priv, reg, temp);
5245
5246 reg = FDI_RX_CTL(pipe);
5247 temp = intel_de_read(dev_priv, reg);
5248 if (HAS_PCH_CPT(dev_priv)) {
5249 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5250 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5251 } else {
5252 temp &= ~FDI_LINK_TRAIN_NONE;
5253 temp |= FDI_LINK_TRAIN_PATTERN_2;
5254 }
5255 intel_de_write(dev_priv, reg, temp);
5256
5257 intel_de_posting_read(dev_priv, reg);
5258 udelay(150);
5259
5260 for (i = 0; i < 4; i++) {
5261 reg = FDI_TX_CTL(pipe);
5262 temp = intel_de_read(dev_priv, reg);
5263 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5264 temp |= snb_b_fdi_train_param[i];
5265 intel_de_write(dev_priv, reg, temp);
5266
5267 intel_de_posting_read(dev_priv, reg);
5268 udelay(500);
5269
5270 for (retry = 0; retry < 5; retry++) {
5271 reg = FDI_RX_IIR(pipe);
5272 temp = intel_de_read(dev_priv, reg);
5273 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5274 if (temp & FDI_RX_SYMBOL_LOCK) {
5275 intel_de_write(dev_priv, reg,
5276 temp | FDI_RX_SYMBOL_LOCK);
5277 drm_dbg_kms(&dev_priv->drm,
5278 "FDI train 2 done.\n");
5279 break;
5280 }
5281 udelay(50);
5282 }
5283 if (retry < 5)
5284 break;
5285 }
5286 if (i == 4)
5287 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5288
5289 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5290 }
5291
5292 /* Manual link training for Ivy Bridge A0 parts */
5293 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5294 const struct intel_crtc_state *crtc_state)
5295 {
5296 struct drm_device *dev = crtc->base.dev;
5297 struct drm_i915_private *dev_priv = to_i915(dev);
5298 enum pipe pipe = crtc->pipe;
5299 i915_reg_t reg;
5300 u32 temp, i, j;
5301
5302 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5303 for train result */
5304 reg = FDI_RX_IMR(pipe);
5305 temp = intel_de_read(dev_priv, reg);
5306 temp &= ~FDI_RX_SYMBOL_LOCK;
5307 temp &= ~FDI_RX_BIT_LOCK;
5308 intel_de_write(dev_priv, reg, temp);
5309
5310 intel_de_posting_read(dev_priv, reg);
5311 udelay(150);
5312
5313 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
5314 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
5315
5316 /* Try each vswing and preemphasis setting twice before moving on */
5317 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5318 /* disable first in case we need to retry */
5319 reg = FDI_TX_CTL(pipe);
5320 temp = intel_de_read(dev_priv, reg);
5321 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5322 temp &= ~FDI_TX_ENABLE;
5323 intel_de_write(dev_priv, reg, temp);
5324
5325 reg = FDI_RX_CTL(pipe);
5326 temp = intel_de_read(dev_priv, reg);
5327 temp &= ~FDI_LINK_TRAIN_AUTO;
5328 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5329 temp &= ~FDI_RX_ENABLE;
5330 intel_de_write(dev_priv, reg, temp);
5331
5332 /* enable CPU FDI TX and PCH FDI RX */
5333 reg = FDI_TX_CTL(pipe);
5334 temp = intel_de_read(dev_priv, reg);
5335 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5336 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5337 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5338 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5339 temp |= snb_b_fdi_train_param[j/2];
5340 temp |= FDI_COMPOSITE_SYNC;
5341 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5342
5343 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5344 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5345
5346 reg = FDI_RX_CTL(pipe);
5347 temp = intel_de_read(dev_priv, reg);
5348 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5349 temp |= FDI_COMPOSITE_SYNC;
5350 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5351
5352 intel_de_posting_read(dev_priv, reg);
5353 udelay(1); /* should be 0.5us */
5354
5355 for (i = 0; i < 4; i++) {
5356 reg = FDI_RX_IIR(pipe);
5357 temp = intel_de_read(dev_priv, reg);
5358 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5359
5360 if (temp & FDI_RX_BIT_LOCK ||
5361 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
5362 intel_de_write(dev_priv, reg,
5363 temp | FDI_RX_BIT_LOCK);
5364 drm_dbg_kms(&dev_priv->drm,
5365 "FDI train 1 done, level %i.\n",
5366 i);
5367 break;
5368 }
5369 udelay(1); /* should be 0.5us */
5370 }
5371 if (i == 4) {
5372 drm_dbg_kms(&dev_priv->drm,
5373 "FDI train 1 fail on vswing %d\n", j / 2);
5374 continue;
5375 }
5376
5377 /* Train 2 */
5378 reg = FDI_TX_CTL(pipe);
5379 temp = intel_de_read(dev_priv, reg);
5380 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5381 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5382 intel_de_write(dev_priv, reg, temp);
5383
5384 reg = FDI_RX_CTL(pipe);
5385 temp = intel_de_read(dev_priv, reg);
5386 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5387 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5388 intel_de_write(dev_priv, reg, temp);
5389
5390 intel_de_posting_read(dev_priv, reg);
5391 udelay(2); /* should be 1.5us */
5392
5393 for (i = 0; i < 4; i++) {
5394 reg = FDI_RX_IIR(pipe);
5395 temp = intel_de_read(dev_priv, reg);
5396 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5397
5398 if (temp & FDI_RX_SYMBOL_LOCK ||
5399 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
5400 intel_de_write(dev_priv, reg,
5401 temp | FDI_RX_SYMBOL_LOCK);
5402 drm_dbg_kms(&dev_priv->drm,
5403 "FDI train 2 done, level %i.\n",
5404 i);
5405 goto train_done;
5406 }
5407 udelay(2); /* should be 1.5us */
5408 }
5409 if (i == 4)
5410 drm_dbg_kms(&dev_priv->drm,
5411 "FDI train 2 fail on vswing %d\n", j / 2);
5412 }
5413
5414 train_done:
5415 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5416 }
5417
5418 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5419 {
5420 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5421 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5422 enum pipe pipe = intel_crtc->pipe;
5423 i915_reg_t reg;
5424 u32 temp;
5425
5426 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5427 reg = FDI_RX_CTL(pipe);
5428 temp = intel_de_read(dev_priv, reg);
5429 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5430 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5431 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5432 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
5433
5434 intel_de_posting_read(dev_priv, reg);
5435 udelay(200);
5436
5437 /* Switch from Rawclk to PCDclk */
5438 temp = intel_de_read(dev_priv, reg);
5439 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
5440
5441 intel_de_posting_read(dev_priv, reg);
5442 udelay(200);
5443
5444 /* Enable CPU FDI TX PLL, always on for Ironlake */
5445 reg = FDI_TX_CTL(pipe);
5446 temp = intel_de_read(dev_priv, reg);
5447 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5448 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
5449
5450 intel_de_posting_read(dev_priv, reg);
5451 udelay(100);
5452 }
5453 }
5454
5455 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
5456 {
5457 struct drm_device *dev = intel_crtc->base.dev;
5458 struct drm_i915_private *dev_priv = to_i915(dev);
5459 enum pipe pipe = intel_crtc->pipe;
5460 i915_reg_t reg;
5461 u32 temp;
5462
5463 /* Switch from PCDclk to Rawclk */
5464 reg = FDI_RX_CTL(pipe);
5465 temp = intel_de_read(dev_priv, reg);
5466 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
5467
5468 /* Disable CPU FDI TX PLL */
5469 reg = FDI_TX_CTL(pipe);
5470 temp = intel_de_read(dev_priv, reg);
5471 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
5472
5473 intel_de_posting_read(dev_priv, reg);
5474 udelay(100);
5475
5476 reg = FDI_RX_CTL(pipe);
5477 temp = intel_de_read(dev_priv, reg);
5478 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5479
5480 /* Wait for the clocks to turn off. */
5481 intel_de_posting_read(dev_priv, reg);
5482 udelay(100);
5483 }
5484
5485 static void ilk_fdi_disable(struct intel_crtc *crtc)
5486 {
5487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5488 enum pipe pipe = crtc->pipe;
5489 i915_reg_t reg;
5490 u32 temp;
5491
5492 /* disable CPU FDI tx and PCH FDI rx */
5493 reg = FDI_TX_CTL(pipe);
5494 temp = intel_de_read(dev_priv, reg);
5495 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5496 intel_de_posting_read(dev_priv, reg);
5497
5498 reg = FDI_RX_CTL(pipe);
5499 temp = intel_de_read(dev_priv, reg);
5500 temp &= ~(0x7 << 16);
5501 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5502 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5503
5504 intel_de_posting_read(dev_priv, reg);
5505 udelay(100);
5506
5507 /* Ironlake workaround, disable clock pointer after downing FDI */
5508 if (HAS_PCH_IBX(dev_priv))
5509 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5510 FDI_RX_PHASE_SYNC_POINTER_OVR);
5511
5512 /* still set train pattern 1 */
5513 reg = FDI_TX_CTL(pipe);
5514 temp = intel_de_read(dev_priv, reg);
5515 temp &= ~FDI_LINK_TRAIN_NONE;
5516 temp |= FDI_LINK_TRAIN_PATTERN_1;
5517 intel_de_write(dev_priv, reg, temp);
5518
5519 reg = FDI_RX_CTL(pipe);
5520 temp = intel_de_read(dev_priv, reg);
5521 if (HAS_PCH_CPT(dev_priv)) {
5522 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5523 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5524 } else {
5525 temp &= ~FDI_LINK_TRAIN_NONE;
5526 temp |= FDI_LINK_TRAIN_PATTERN_1;
5527 }
5528 /* BPC in FDI rx is consistent with that in PIPECONF */
5529 temp &= ~(0x07 << 16);
5530 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5531 intel_de_write(dev_priv, reg, temp);
5532
5533 intel_de_posting_read(dev_priv, reg);
5534 udelay(100);
5535 }
5536
5537 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5538 {
5539 struct drm_crtc *crtc;
5540 bool cleanup_done;
5541
5542 drm_for_each_crtc(crtc, &dev_priv->drm) {
5543 struct drm_crtc_commit *commit;
5544 spin_lock(&crtc->commit_lock);
5545 commit = list_first_entry_or_null(&crtc->commit_list,
5546 struct drm_crtc_commit, commit_entry);
5547 cleanup_done = commit ?
5548 try_wait_for_completion(&commit->cleanup_done) : true;
5549 spin_unlock(&crtc->commit_lock);
5550
5551 if (cleanup_done)
5552 continue;
5553
5554 drm_crtc_wait_one_vblank(crtc);
5555
5556 return true;
5557 }
5558
5559 return false;
5560 }
5561
5562 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5563 {
5564 u32 temp;
5565
5566 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5567
5568 mutex_lock(&dev_priv->sb_lock);
5569
5570 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5571 temp |= SBI_SSCCTL_DISABLE;
5572 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5573
5574 mutex_unlock(&dev_priv->sb_lock);
5575 }
5576
5577 /* Program iCLKIP clock to the desired frequency */
5578 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5579 {
5580 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5581 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5582 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5583 u32 divsel, phaseinc, auxdiv, phasedir = 0;
5584 u32 temp;
5585
5586 lpt_disable_iclkip(dev_priv);
5587
5588 /* The iCLK virtual clock root frequency is in MHz,
5589 * but the adjusted_mode->crtc_clock in in KHz. To get the
5590 * divisors, it is necessary to divide one by another, so we
5591 * convert the virtual clock precision to KHz here for higher
5592 * precision.
5593 */
5594 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5595 u32 iclk_virtual_root_freq = 172800 * 1000;
5596 u32 iclk_pi_range = 64;
5597 u32 desired_divisor;
5598
5599 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5600 clock << auxdiv);
5601 divsel = (desired_divisor / iclk_pi_range) - 2;
5602 phaseinc = desired_divisor % iclk_pi_range;
5603
5604 /*
5605 * Near 20MHz is a corner case which is
5606 * out of range for the 7-bit divisor
5607 */
5608 if (divsel <= 0x7f)
5609 break;
5610 }
5611
5612 /* This should not happen with any sane values */
5613 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5614 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5615 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
5616 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5617
5618 drm_dbg_kms(&dev_priv->drm,
5619 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5620 clock, auxdiv, divsel, phasedir, phaseinc);
5621
5622 mutex_lock(&dev_priv->sb_lock);
5623
5624 /* Program SSCDIVINTPHASE6 */
5625 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5626 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5627 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5628 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5629 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5630 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5631 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5632 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5633
5634 /* Program SSCAUXDIV */
5635 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5636 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5637 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5638 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5639
5640 /* Enable modulator and associated divider */
5641 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5642 temp &= ~SBI_SSCCTL_DISABLE;
5643 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5644
5645 mutex_unlock(&dev_priv->sb_lock);
5646
5647 /* Wait for initialization time */
5648 udelay(24);
5649
5650 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5651 }
5652
5653 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5654 {
5655 u32 divsel, phaseinc, auxdiv;
5656 u32 iclk_virtual_root_freq = 172800 * 1000;
5657 u32 iclk_pi_range = 64;
5658 u32 desired_divisor;
5659 u32 temp;
5660
5661 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5662 return 0;
5663
5664 mutex_lock(&dev_priv->sb_lock);
5665
5666 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5667 if (temp & SBI_SSCCTL_DISABLE) {
5668 mutex_unlock(&dev_priv->sb_lock);
5669 return 0;
5670 }
5671
5672 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5673 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5674 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5675 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5676 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5677
5678 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5679 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5680 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5681
5682 mutex_unlock(&dev_priv->sb_lock);
5683
5684 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5685
5686 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5687 desired_divisor << auxdiv);
5688 }
5689
5690 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5691 enum pipe pch_transcoder)
5692 {
5693 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5694 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5695 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5696
5697 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5698 intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5699 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5700 intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5701 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5702 intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5703
5704 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5705 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5706 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5707 intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5708 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5709 intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5710 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5711 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5712 }
5713
5714 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5715 {
5716 u32 temp;
5717
5718 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5719 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5720 return;
5721
5722 drm_WARN_ON(&dev_priv->drm,
5723 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
5724 FDI_RX_ENABLE);
5725 drm_WARN_ON(&dev_priv->drm,
5726 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
5727 FDI_RX_ENABLE);
5728
5729 temp &= ~FDI_BC_BIFURCATION_SELECT;
5730 if (enable)
5731 temp |= FDI_BC_BIFURCATION_SELECT;
5732
5733 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5734 enable ? "en" : "dis");
5735 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5736 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5737 }
5738
5739 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5740 {
5741 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5742 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5743
5744 switch (crtc->pipe) {
5745 case PIPE_A:
5746 break;
5747 case PIPE_B:
5748 if (crtc_state->fdi_lanes > 2)
5749 cpt_set_fdi_bc_bifurcation(dev_priv, false);
5750 else
5751 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5752
5753 break;
5754 case PIPE_C:
5755 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5756
5757 break;
5758 default:
5759 BUG();
5760 }
5761 }
5762
5763 /*
5764 * Finds the encoder associated with the given CRTC. This can only be
5765 * used when we know that the CRTC isn't feeding multiple encoders!
5766 */
5767 static struct intel_encoder *
5768 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5769 const struct intel_crtc_state *crtc_state)
5770 {
5771 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5772 const struct drm_connector_state *connector_state;
5773 const struct drm_connector *connector;
5774 struct intel_encoder *encoder = NULL;
5775 int num_encoders = 0;
5776 int i;
5777
5778 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5779 if (connector_state->crtc != &crtc->base)
5780 continue;
5781
5782 encoder = to_intel_encoder(connector_state->best_encoder);
5783 num_encoders++;
5784 }
5785
5786 drm_WARN(encoder->base.dev, num_encoders != 1,
5787 "%d encoders for pipe %c\n",
5788 num_encoders, pipe_name(crtc->pipe));
5789
5790 return encoder;
5791 }
5792
5793 /*
5794 * Enable PCH resources required for PCH ports:
5795 * - PCH PLLs
5796 * - FDI training & RX/TX
5797 * - update transcoder timings
5798 * - DP transcoding bits
5799 * - transcoder
5800 */
5801 static void ilk_pch_enable(const struct intel_atomic_state *state,
5802 const struct intel_crtc_state *crtc_state)
5803 {
5804 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5805 struct drm_device *dev = crtc->base.dev;
5806 struct drm_i915_private *dev_priv = to_i915(dev);
5807 enum pipe pipe = crtc->pipe;
5808 u32 temp;
5809
5810 assert_pch_transcoder_disabled(dev_priv, pipe);
5811
5812 if (IS_IVYBRIDGE(dev_priv))
5813 ivb_update_fdi_bc_bifurcation(crtc_state);
5814
5815 /* Write the TU size bits before fdi link training, so that error
5816 * detection works. */
5817 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5818 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5819
5820 /* For PCH output, training FDI link */
5821 dev_priv->display.fdi_link_train(crtc, crtc_state);
5822
5823 /* We need to program the right clock selection before writing the pixel
5824 * mutliplier into the DPLL. */
5825 if (HAS_PCH_CPT(dev_priv)) {
5826 u32 sel;
5827
5828 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5829 temp |= TRANS_DPLL_ENABLE(pipe);
5830 sel = TRANS_DPLLB_SEL(pipe);
5831 if (crtc_state->shared_dpll ==
5832 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5833 temp |= sel;
5834 else
5835 temp &= ~sel;
5836 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5837 }
5838
5839 /* XXX: pch pll's can be enabled any time before we enable the PCH
5840 * transcoder, and we actually should do this to not upset any PCH
5841 * transcoder that already use the clock when we share it.
5842 *
5843 * Note that enable_shared_dpll tries to do the right thing, but
5844 * get_shared_dpll unconditionally resets the pll - we need that to have
5845 * the right LVDS enable sequence. */
5846 intel_enable_shared_dpll(crtc_state);
5847
5848 /* set transcoder timing, panel must allow it */
5849 assert_panel_unlocked(dev_priv, pipe);
5850 ilk_pch_transcoder_set_timings(crtc_state, pipe);
5851
5852 intel_fdi_normal_train(crtc);
5853
5854 /* For PCH DP, enable TRANS_DP_CTL */
5855 if (HAS_PCH_CPT(dev_priv) &&
5856 intel_crtc_has_dp_encoder(crtc_state)) {
5857 const struct drm_display_mode *adjusted_mode =
5858 &crtc_state->hw.adjusted_mode;
5859 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5860 i915_reg_t reg = TRANS_DP_CTL(pipe);
5861 enum port port;
5862
5863 temp = intel_de_read(dev_priv, reg);
5864 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5865 TRANS_DP_SYNC_MASK |
5866 TRANS_DP_BPC_MASK);
5867 temp |= TRANS_DP_OUTPUT_ENABLE;
5868 temp |= bpc << 9; /* same format but at 11:9 */
5869
5870 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5871 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5872 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5873 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5874
5875 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5876 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
5877 temp |= TRANS_DP_PORT_SEL(port);
5878
5879 intel_de_write(dev_priv, reg, temp);
5880 }
5881
5882 ilk_enable_pch_transcoder(crtc_state);
5883 }
5884
5885 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5886 {
5887 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5889 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5890
5891 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5892
5893 lpt_program_iclkip(crtc_state);
5894
5895 /* Set transcoder timing. */
5896 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5897
5898 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5899 }
5900
5901 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5902 enum pipe pipe)
5903 {
5904 i915_reg_t dslreg = PIPEDSL(pipe);
5905 u32 temp;
5906
5907 temp = intel_de_read(dev_priv, dslreg);
5908 udelay(500);
5909 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5910 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5911 drm_err(&dev_priv->drm,
5912 "mode set failed: pipe %c stuck\n",
5913 pipe_name(pipe));
5914 }
5915 }
5916
5917 /*
5918 * The hardware phase 0.0 refers to the center of the pixel.
5919 * We want to start from the top/left edge which is phase
5920 * -0.5. That matches how the hardware calculates the scaling
5921 * factors (from top-left of the first pixel to bottom-right
5922 * of the last pixel, as opposed to the pixel centers).
5923 *
5924 * For 4:2:0 subsampled chroma planes we obviously have to
5925 * adjust that so that the chroma sample position lands in
5926 * the right spot.
5927 *
5928 * Note that for packed YCbCr 4:2:2 formats there is no way to
5929 * control chroma siting. The hardware simply replicates the
5930 * chroma samples for both of the luma samples, and thus we don't
5931 * actually get the expected MPEG2 chroma siting convention :(
5932 * The same behaviour is observed on pre-SKL platforms as well.
5933 *
5934 * Theory behind the formula (note that we ignore sub-pixel
5935 * source coordinates):
5936 * s = source sample position
5937 * d = destination sample position
5938 *
5939 * Downscaling 4:1:
5940 * -0.5
5941 * | 0.0
5942 * | | 1.5 (initial phase)
5943 * | | |
5944 * v v v
5945 * | s | s | s | s |
5946 * | d |
5947 *
5948 * Upscaling 1:4:
5949 * -0.5
5950 * | -0.375 (initial phase)
5951 * | | 0.0
5952 * | | |
5953 * v v v
5954 * | s |
5955 * | d | d | d | d |
5956 */
5957 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5958 {
5959 int phase = -0x8000;
5960 u16 trip = 0;
5961
5962 if (chroma_cosited)
5963 phase += (sub - 1) * 0x8000 / sub;
5964
5965 phase += scale / (2 * sub);
5966
5967 /*
5968 * Hardware initial phase limited to [-0.5:1.5].
5969 * Since the max hardware scale factor is 3.0, we
5970 * should never actually excdeed 1.0 here.
5971 */
5972 WARN_ON(phase < -0x8000 || phase > 0x18000);
5973
5974 if (phase < 0)
5975 phase = 0x10000 + phase;
5976 else
5977 trip = PS_PHASE_TRIP;
5978
5979 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5980 }
5981
5982 #define SKL_MIN_SRC_W 8
5983 #define SKL_MAX_SRC_W 4096
5984 #define SKL_MIN_SRC_H 8
5985 #define SKL_MAX_SRC_H 4096
5986 #define SKL_MIN_DST_W 8
5987 #define SKL_MAX_DST_W 4096
5988 #define SKL_MIN_DST_H 8
5989 #define SKL_MAX_DST_H 4096
5990 #define ICL_MAX_SRC_W 5120
5991 #define ICL_MAX_SRC_H 4096
5992 #define ICL_MAX_DST_W 5120
5993 #define ICL_MAX_DST_H 4096
5994 #define SKL_MIN_YUV_420_SRC_W 16
5995 #define SKL_MIN_YUV_420_SRC_H 16
5996
5997 static int
5998 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5999 unsigned int scaler_user, int *scaler_id,
6000 int src_w, int src_h, int dst_w, int dst_h,
6001 const struct drm_format_info *format,
6002 u64 modifier, bool need_scaler)
6003 {
6004 struct intel_crtc_scaler_state *scaler_state =
6005 &crtc_state->scaler_state;
6006 struct intel_crtc *intel_crtc =
6007 to_intel_crtc(crtc_state->uapi.crtc);
6008 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
6009 const struct drm_display_mode *adjusted_mode =
6010 &crtc_state->hw.adjusted_mode;
6011
6012 /*
6013 * Src coordinates are already rotated by 270 degrees for
6014 * the 90/270 degree plane rotation cases (to match the
6015 * GTT mapping), hence no need to account for rotation here.
6016 */
6017 if (src_w != dst_w || src_h != dst_h)
6018 need_scaler = true;
6019
6020 /*
6021 * Scaling/fitting not supported in IF-ID mode in GEN9+
6022 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
6023 * Once NV12 is enabled, handle it here while allocating scaler
6024 * for NV12.
6025 */
6026 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
6027 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6028 drm_dbg_kms(&dev_priv->drm,
6029 "Pipe/Plane scaling not supported with IF-ID mode\n");
6030 return -EINVAL;
6031 }
6032
6033 /*
6034 * if plane is being disabled or scaler is no more required or force detach
6035 * - free scaler binded to this plane/crtc
6036 * - in order to do this, update crtc->scaler_usage
6037 *
6038 * Here scaler state in crtc_state is set free so that
6039 * scaler can be assigned to other user. Actual register
6040 * update to free the scaler is done in plane/panel-fit programming.
6041 * For this purpose crtc/plane_state->scaler_id isn't reset here.
6042 */
6043 if (force_detach || !need_scaler) {
6044 if (*scaler_id >= 0) {
6045 scaler_state->scaler_users &= ~(1 << scaler_user);
6046 scaler_state->scalers[*scaler_id].in_use = 0;
6047
6048 drm_dbg_kms(&dev_priv->drm,
6049 "scaler_user index %u.%u: "
6050 "Staged freeing scaler id %d scaler_users = 0x%x\n",
6051 intel_crtc->pipe, scaler_user, *scaler_id,
6052 scaler_state->scaler_users);
6053 *scaler_id = -1;
6054 }
6055 return 0;
6056 }
6057
6058 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
6059 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
6060 drm_dbg_kms(&dev_priv->drm,
6061 "Planar YUV: src dimensions not met\n");
6062 return -EINVAL;
6063 }
6064
6065 /* range checks */
6066 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
6067 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
6068 (INTEL_GEN(dev_priv) >= 11 &&
6069 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
6070 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
6071 (INTEL_GEN(dev_priv) < 11 &&
6072 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
6073 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
6074 drm_dbg_kms(&dev_priv->drm,
6075 "scaler_user index %u.%u: src %ux%u dst %ux%u "
6076 "size is out of scaler range\n",
6077 intel_crtc->pipe, scaler_user, src_w, src_h,
6078 dst_w, dst_h);
6079 return -EINVAL;
6080 }
6081
6082 /* mark this plane as a scaler user in crtc_state */
6083 scaler_state->scaler_users |= (1 << scaler_user);
6084 drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
6085 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
6086 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
6087 scaler_state->scaler_users);
6088
6089 return 0;
6090 }
6091
6092 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
6093 {
6094 const struct drm_display_mode *adjusted_mode =
6095 &crtc_state->hw.adjusted_mode;
6096 int width, height;
6097
6098 if (crtc_state->pch_pfit.enabled) {
6099 width = drm_rect_width(&crtc_state->pch_pfit.dst);
6100 height = drm_rect_height(&crtc_state->pch_pfit.dst);
6101 } else {
6102 width = adjusted_mode->crtc_hdisplay;
6103 height = adjusted_mode->crtc_vdisplay;
6104 }
6105
6106 return skl_update_scaler(crtc_state, !crtc_state->hw.active,
6107 SKL_CRTC_INDEX,
6108 &crtc_state->scaler_state.scaler_id,
6109 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
6110 width, height, NULL, 0,
6111 crtc_state->pch_pfit.enabled);
6112 }
6113
6114 /**
6115 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
6116 * @crtc_state: crtc's scaler state
6117 * @plane_state: atomic plane state to update
6118 *
6119 * Return
6120 * 0 - scaler_usage updated successfully
6121 * error - requested scaling cannot be supported or other error condition
6122 */
6123 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
6124 struct intel_plane_state *plane_state)
6125 {
6126 struct intel_plane *intel_plane =
6127 to_intel_plane(plane_state->uapi.plane);
6128 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
6129 struct drm_framebuffer *fb = plane_state->hw.fb;
6130 int ret;
6131 bool force_detach = !fb || !plane_state->uapi.visible;
6132 bool need_scaler = false;
6133
6134 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
6135 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
6136 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
6137 need_scaler = true;
6138
6139 ret = skl_update_scaler(crtc_state, force_detach,
6140 drm_plane_index(&intel_plane->base),
6141 &plane_state->scaler_id,
6142 drm_rect_width(&plane_state->uapi.src) >> 16,
6143 drm_rect_height(&plane_state->uapi.src) >> 16,
6144 drm_rect_width(&plane_state->uapi.dst),
6145 drm_rect_height(&plane_state->uapi.dst),
6146 fb ? fb->format : NULL,
6147 fb ? fb->modifier : 0,
6148 need_scaler);
6149
6150 if (ret || plane_state->scaler_id < 0)
6151 return ret;
6152
6153 /* check colorkey */
6154 if (plane_state->ckey.flags) {
6155 drm_dbg_kms(&dev_priv->drm,
6156 "[PLANE:%d:%s] scaling with color key not allowed",
6157 intel_plane->base.base.id,
6158 intel_plane->base.name);
6159 return -EINVAL;
6160 }
6161
6162 /* Check src format */
6163 switch (fb->format->format) {
6164 case DRM_FORMAT_RGB565:
6165 case DRM_FORMAT_XBGR8888:
6166 case DRM_FORMAT_XRGB8888:
6167 case DRM_FORMAT_ABGR8888:
6168 case DRM_FORMAT_ARGB8888:
6169 case DRM_FORMAT_XRGB2101010:
6170 case DRM_FORMAT_XBGR2101010:
6171 case DRM_FORMAT_ARGB2101010:
6172 case DRM_FORMAT_ABGR2101010:
6173 case DRM_FORMAT_YUYV:
6174 case DRM_FORMAT_YVYU:
6175 case DRM_FORMAT_UYVY:
6176 case DRM_FORMAT_VYUY:
6177 case DRM_FORMAT_NV12:
6178 case DRM_FORMAT_XYUV8888:
6179 case DRM_FORMAT_P010:
6180 case DRM_FORMAT_P012:
6181 case DRM_FORMAT_P016:
6182 case DRM_FORMAT_Y210:
6183 case DRM_FORMAT_Y212:
6184 case DRM_FORMAT_Y216:
6185 case DRM_FORMAT_XVYU2101010:
6186 case DRM_FORMAT_XVYU12_16161616:
6187 case DRM_FORMAT_XVYU16161616:
6188 break;
6189 case DRM_FORMAT_XBGR16161616F:
6190 case DRM_FORMAT_ABGR16161616F:
6191 case DRM_FORMAT_XRGB16161616F:
6192 case DRM_FORMAT_ARGB16161616F:
6193 if (INTEL_GEN(dev_priv) >= 11)
6194 break;
6195 /* fall through */
6196 default:
6197 drm_dbg_kms(&dev_priv->drm,
6198 "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
6199 intel_plane->base.base.id, intel_plane->base.name,
6200 fb->base.id, fb->format->format);
6201 return -EINVAL;
6202 }
6203
6204 return 0;
6205 }
6206
6207 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
6208 {
6209 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6210 int i;
6211
6212 for (i = 0; i < crtc->num_scalers; i++)
6213 skl_detach_scaler(crtc, i);
6214 }
6215
6216 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
6217 {
6218 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6219 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6220 const struct intel_crtc_scaler_state *scaler_state =
6221 &crtc_state->scaler_state;
6222 struct drm_rect src = {
6223 .x2 = crtc_state->pipe_src_w << 16,
6224 .y2 = crtc_state->pipe_src_h << 16,
6225 };
6226 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6227 u16 uv_rgb_hphase, uv_rgb_vphase;
6228 enum pipe pipe = crtc->pipe;
6229 int width = drm_rect_width(dst);
6230 int height = drm_rect_height(dst);
6231 int x = dst->x1;
6232 int y = dst->y1;
6233 int hscale, vscale;
6234 unsigned long irqflags;
6235 int id;
6236
6237 if (!crtc_state->pch_pfit.enabled)
6238 return;
6239
6240 if (drm_WARN_ON(&dev_priv->drm,
6241 crtc_state->scaler_state.scaler_id < 0))
6242 return;
6243
6244 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
6245 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
6246
6247 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
6248 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
6249
6250 id = scaler_state->scaler_id;
6251
6252 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6253
6254 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
6255 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
6256 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
6257 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
6258 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
6259 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
6260 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
6261 x << 16 | y);
6262 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
6263 width << 16 | height);
6264
6265 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6266 }
6267
6268 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
6269 {
6270 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6271 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6272 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6273 enum pipe pipe = crtc->pipe;
6274 int width = drm_rect_width(dst);
6275 int height = drm_rect_height(dst);
6276 int x = dst->x1;
6277 int y = dst->y1;
6278
6279 if (!crtc_state->pch_pfit.enabled)
6280 return;
6281
6282 /* Force use of hard-coded filter coefficients
6283 * as some pre-programmed values are broken,
6284 * e.g. x201.
6285 */
6286 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6287 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6288 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
6289 else
6290 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6291 PF_FILTER_MED_3x3);
6292 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
6293 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
6294 }
6295
6296 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6297 {
6298 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6299 struct drm_device *dev = crtc->base.dev;
6300 struct drm_i915_private *dev_priv = to_i915(dev);
6301
6302 if (!crtc_state->ips_enabled)
6303 return;
6304
6305 /*
6306 * We can only enable IPS after we enable a plane and wait for a vblank
6307 * This function is called from post_plane_update, which is run after
6308 * a vblank wait.
6309 */
6310 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6311
6312 if (IS_BROADWELL(dev_priv)) {
6313 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6314 IPS_ENABLE | IPS_PCODE_CONTROL));
6315 /* Quoting Art Runyan: "its not safe to expect any particular
6316 * value in IPS_CTL bit 31 after enabling IPS through the
6317 * mailbox." Moreover, the mailbox may return a bogus state,
6318 * so we need to just enable it and continue on.
6319 */
6320 } else {
6321 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
6322 /* The bit only becomes 1 in the next vblank, so this wait here
6323 * is essentially intel_wait_for_vblank. If we don't have this
6324 * and don't wait for vblanks until the end of crtc_enable, then
6325 * the HW state readout code will complain that the expected
6326 * IPS_CTL value is not the one we read. */
6327 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6328 drm_err(&dev_priv->drm,
6329 "Timed out waiting for IPS enable\n");
6330 }
6331 }
6332
6333 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6334 {
6335 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6336 struct drm_device *dev = crtc->base.dev;
6337 struct drm_i915_private *dev_priv = to_i915(dev);
6338
6339 if (!crtc_state->ips_enabled)
6340 return;
6341
6342 if (IS_BROADWELL(dev_priv)) {
6343 drm_WARN_ON(dev,
6344 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6345 /*
6346 * Wait for PCODE to finish disabling IPS. The BSpec specified
6347 * 42ms timeout value leads to occasional timeouts so use 100ms
6348 * instead.
6349 */
6350 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6351 drm_err(&dev_priv->drm,
6352 "Timed out waiting for IPS disable\n");
6353 } else {
6354 intel_de_write(dev_priv, IPS_CTL, 0);
6355 intel_de_posting_read(dev_priv, IPS_CTL);
6356 }
6357
6358 /* We need to wait for a vblank before we can disable the plane. */
6359 intel_wait_for_vblank(dev_priv, crtc->pipe);
6360 }
6361
6362 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6363 {
6364 if (intel_crtc->overlay)
6365 (void) intel_overlay_switch_off(intel_crtc->overlay);
6366
6367 /* Let userspace switch the overlay on again. In most cases userspace
6368 * has to recompute where to put it anyway.
6369 */
6370 }
6371
6372 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6373 const struct intel_crtc_state *new_crtc_state)
6374 {
6375 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6376 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6377
6378 if (!old_crtc_state->ips_enabled)
6379 return false;
6380
6381 if (needs_modeset(new_crtc_state))
6382 return true;
6383
6384 /*
6385 * Workaround : Do not read or write the pipe palette/gamma data while
6386 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6387 *
6388 * Disable IPS before we program the LUT.
6389 */
6390 if (IS_HASWELL(dev_priv) &&
6391 (new_crtc_state->uapi.color_mgmt_changed ||
6392 new_crtc_state->update_pipe) &&
6393 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6394 return true;
6395
6396 return !new_crtc_state->ips_enabled;
6397 }
6398
6399 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6400 const struct intel_crtc_state *new_crtc_state)
6401 {
6402 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6403 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6404
6405 if (!new_crtc_state->ips_enabled)
6406 return false;
6407
6408 if (needs_modeset(new_crtc_state))
6409 return true;
6410
6411 /*
6412 * Workaround : Do not read or write the pipe palette/gamma data while
6413 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6414 *
6415 * Re-enable IPS after the LUT has been programmed.
6416 */
6417 if (IS_HASWELL(dev_priv) &&
6418 (new_crtc_state->uapi.color_mgmt_changed ||
6419 new_crtc_state->update_pipe) &&
6420 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6421 return true;
6422
6423 /*
6424 * We can't read out IPS on broadwell, assume the worst and
6425 * forcibly enable IPS on the first fastset.
6426 */
6427 if (new_crtc_state->update_pipe &&
6428 old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6429 return true;
6430
6431 return !old_crtc_state->ips_enabled;
6432 }
6433
6434 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6435 {
6436 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6437
6438 if (!crtc_state->nv12_planes)
6439 return false;
6440
6441 /* WA Display #0827: Gen9:all */
6442 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6443 return true;
6444
6445 return false;
6446 }
6447
6448 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6449 {
6450 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6451
6452 /* Wa_2006604312:icl,ehl */
6453 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
6454 return true;
6455
6456 return false;
6457 }
6458
6459 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6460 const struct intel_crtc_state *new_crtc_state)
6461 {
6462 return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6463 new_crtc_state->active_planes;
6464 }
6465
6466 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6467 const struct intel_crtc_state *new_crtc_state)
6468 {
6469 return old_crtc_state->active_planes &&
6470 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6471 }
6472
6473 static void intel_post_plane_update(struct intel_atomic_state *state,
6474 struct intel_crtc *crtc)
6475 {
6476 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6477 const struct intel_crtc_state *old_crtc_state =
6478 intel_atomic_get_old_crtc_state(state, crtc);
6479 const struct intel_crtc_state *new_crtc_state =
6480 intel_atomic_get_new_crtc_state(state, crtc);
6481 enum pipe pipe = crtc->pipe;
6482
6483 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6484
6485 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6486 intel_update_watermarks(crtc);
6487
6488 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6489 hsw_enable_ips(new_crtc_state);
6490
6491 intel_fbc_post_update(state, crtc);
6492
6493 if (needs_nv12_wa(old_crtc_state) &&
6494 !needs_nv12_wa(new_crtc_state))
6495 skl_wa_827(dev_priv, pipe, false);
6496
6497 if (needs_scalerclk_wa(old_crtc_state) &&
6498 !needs_scalerclk_wa(new_crtc_state))
6499 icl_wa_scalerclkgating(dev_priv, pipe, false);
6500 }
6501
6502 static void intel_pre_plane_update(struct intel_atomic_state *state,
6503 struct intel_crtc *crtc)
6504 {
6505 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6506 const struct intel_crtc_state *old_crtc_state =
6507 intel_atomic_get_old_crtc_state(state, crtc);
6508 const struct intel_crtc_state *new_crtc_state =
6509 intel_atomic_get_new_crtc_state(state, crtc);
6510 enum pipe pipe = crtc->pipe;
6511
6512 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6513 hsw_disable_ips(old_crtc_state);
6514
6515 if (intel_fbc_pre_update(state, crtc))
6516 intel_wait_for_vblank(dev_priv, pipe);
6517
6518 /* Display WA 827 */
6519 if (!needs_nv12_wa(old_crtc_state) &&
6520 needs_nv12_wa(new_crtc_state))
6521 skl_wa_827(dev_priv, pipe, true);
6522
6523 /* Wa_2006604312:icl,ehl */
6524 if (!needs_scalerclk_wa(old_crtc_state) &&
6525 needs_scalerclk_wa(new_crtc_state))
6526 icl_wa_scalerclkgating(dev_priv, pipe, true);
6527
6528 /*
6529 * Vblank time updates from the shadow to live plane control register
6530 * are blocked if the memory self-refresh mode is active at that
6531 * moment. So to make sure the plane gets truly disabled, disable
6532 * first the self-refresh mode. The self-refresh enable bit in turn
6533 * will be checked/applied by the HW only at the next frame start
6534 * event which is after the vblank start event, so we need to have a
6535 * wait-for-vblank between disabling the plane and the pipe.
6536 */
6537 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6538 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6539 intel_wait_for_vblank(dev_priv, pipe);
6540
6541 /*
6542 * IVB workaround: must disable low power watermarks for at least
6543 * one frame before enabling scaling. LP watermarks can be re-enabled
6544 * when scaling is disabled.
6545 *
6546 * WaCxSRDisabledForSpriteScaling:ivb
6547 */
6548 if (old_crtc_state->hw.active &&
6549 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6550 intel_wait_for_vblank(dev_priv, pipe);
6551
6552 /*
6553 * If we're doing a modeset we don't need to do any
6554 * pre-vblank watermark programming here.
6555 */
6556 if (!needs_modeset(new_crtc_state)) {
6557 /*
6558 * For platforms that support atomic watermarks, program the
6559 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
6560 * will be the intermediate values that are safe for both pre- and
6561 * post- vblank; when vblank happens, the 'active' values will be set
6562 * to the final 'target' values and we'll do this again to get the
6563 * optimal watermarks. For gen9+ platforms, the values we program here
6564 * will be the final target values which will get automatically latched
6565 * at vblank time; no further programming will be necessary.
6566 *
6567 * If a platform hasn't been transitioned to atomic watermarks yet,
6568 * we'll continue to update watermarks the old way, if flags tell
6569 * us to.
6570 */
6571 if (dev_priv->display.initial_watermarks)
6572 dev_priv->display.initial_watermarks(state, crtc);
6573 else if (new_crtc_state->update_wm_pre)
6574 intel_update_watermarks(crtc);
6575 }
6576
6577 /*
6578 * Gen2 reports pipe underruns whenever all planes are disabled.
6579 * So disable underrun reporting before all the planes get disabled.
6580 *
6581 * We do this after .initial_watermarks() so that we have a
6582 * chance of catching underruns with the intermediate watermarks
6583 * vs. the old plane configuration.
6584 */
6585 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6586 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6587 }
6588
6589 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6590 struct intel_crtc *crtc)
6591 {
6592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6593 const struct intel_crtc_state *new_crtc_state =
6594 intel_atomic_get_new_crtc_state(state, crtc);
6595 unsigned int update_mask = new_crtc_state->update_planes;
6596 const struct intel_plane_state *old_plane_state;
6597 struct intel_plane *plane;
6598 unsigned fb_bits = 0;
6599 int i;
6600
6601 intel_crtc_dpms_overlay_disable(crtc);
6602
6603 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6604 if (crtc->pipe != plane->pipe ||
6605 !(update_mask & BIT(plane->id)))
6606 continue;
6607
6608 intel_disable_plane(plane, new_crtc_state);
6609
6610 if (old_plane_state->uapi.visible)
6611 fb_bits |= plane->frontbuffer_bit;
6612 }
6613
6614 intel_frontbuffer_flip(dev_priv, fb_bits);
6615 }
6616
6617 /*
6618 * intel_connector_primary_encoder - get the primary encoder for a connector
6619 * @connector: connector for which to return the encoder
6620 *
6621 * Returns the primary encoder for a connector. There is a 1:1 mapping from
6622 * all connectors to their encoder, except for DP-MST connectors which have
6623 * both a virtual and a primary encoder. These DP-MST primary encoders can be
6624 * pointed to by as many DP-MST connectors as there are pipes.
6625 */
6626 static struct intel_encoder *
6627 intel_connector_primary_encoder(struct intel_connector *connector)
6628 {
6629 struct intel_encoder *encoder;
6630
6631 if (connector->mst_port)
6632 return &dp_to_dig_port(connector->mst_port)->base;
6633
6634 encoder = intel_attached_encoder(connector);
6635 drm_WARN_ON(connector->base.dev, !encoder);
6636
6637 return encoder;
6638 }
6639
6640 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6641 {
6642 struct drm_connector_state *new_conn_state;
6643 struct drm_connector *connector;
6644 int i;
6645
6646 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6647 i) {
6648 struct intel_connector *intel_connector;
6649 struct intel_encoder *encoder;
6650 struct intel_crtc *crtc;
6651
6652 if (!intel_connector_needs_modeset(state, connector))
6653 continue;
6654
6655 intel_connector = to_intel_connector(connector);
6656 encoder = intel_connector_primary_encoder(intel_connector);
6657 if (!encoder->update_prepare)
6658 continue;
6659
6660 crtc = new_conn_state->crtc ?
6661 to_intel_crtc(new_conn_state->crtc) : NULL;
6662 encoder->update_prepare(state, encoder, crtc);
6663 }
6664 }
6665
6666 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6667 {
6668 struct drm_connector_state *new_conn_state;
6669 struct drm_connector *connector;
6670 int i;
6671
6672 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6673 i) {
6674 struct intel_connector *intel_connector;
6675 struct intel_encoder *encoder;
6676 struct intel_crtc *crtc;
6677
6678 if (!intel_connector_needs_modeset(state, connector))
6679 continue;
6680
6681 intel_connector = to_intel_connector(connector);
6682 encoder = intel_connector_primary_encoder(intel_connector);
6683 if (!encoder->update_complete)
6684 continue;
6685
6686 crtc = new_conn_state->crtc ?
6687 to_intel_crtc(new_conn_state->crtc) : NULL;
6688 encoder->update_complete(state, encoder, crtc);
6689 }
6690 }
6691
6692 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6693 struct intel_crtc *crtc)
6694 {
6695 const struct intel_crtc_state *crtc_state =
6696 intel_atomic_get_new_crtc_state(state, crtc);
6697 const struct drm_connector_state *conn_state;
6698 struct drm_connector *conn;
6699 int i;
6700
6701 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6702 struct intel_encoder *encoder =
6703 to_intel_encoder(conn_state->best_encoder);
6704
6705 if (conn_state->crtc != &crtc->base)
6706 continue;
6707
6708 if (encoder->pre_pll_enable)
6709 encoder->pre_pll_enable(state, encoder,
6710 crtc_state, conn_state);
6711 }
6712 }
6713
6714 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6715 struct intel_crtc *crtc)
6716 {
6717 const struct intel_crtc_state *crtc_state =
6718 intel_atomic_get_new_crtc_state(state, crtc);
6719 const struct drm_connector_state *conn_state;
6720 struct drm_connector *conn;
6721 int i;
6722
6723 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6724 struct intel_encoder *encoder =
6725 to_intel_encoder(conn_state->best_encoder);
6726
6727 if (conn_state->crtc != &crtc->base)
6728 continue;
6729
6730 if (encoder->pre_enable)
6731 encoder->pre_enable(state, encoder,
6732 crtc_state, conn_state);
6733 }
6734 }
6735
6736 static void intel_encoders_enable(struct intel_atomic_state *state,
6737 struct intel_crtc *crtc)
6738 {
6739 const struct intel_crtc_state *crtc_state =
6740 intel_atomic_get_new_crtc_state(state, crtc);
6741 const struct drm_connector_state *conn_state;
6742 struct drm_connector *conn;
6743 int i;
6744
6745 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6746 struct intel_encoder *encoder =
6747 to_intel_encoder(conn_state->best_encoder);
6748
6749 if (conn_state->crtc != &crtc->base)
6750 continue;
6751
6752 if (encoder->enable)
6753 encoder->enable(state, encoder,
6754 crtc_state, conn_state);
6755 intel_opregion_notify_encoder(encoder, true);
6756 }
6757 }
6758
6759 static void intel_encoders_disable(struct intel_atomic_state *state,
6760 struct intel_crtc *crtc)
6761 {
6762 const struct intel_crtc_state *old_crtc_state =
6763 intel_atomic_get_old_crtc_state(state, crtc);
6764 const struct drm_connector_state *old_conn_state;
6765 struct drm_connector *conn;
6766 int i;
6767
6768 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6769 struct intel_encoder *encoder =
6770 to_intel_encoder(old_conn_state->best_encoder);
6771
6772 if (old_conn_state->crtc != &crtc->base)
6773 continue;
6774
6775 intel_opregion_notify_encoder(encoder, false);
6776 if (encoder->disable)
6777 encoder->disable(state, encoder,
6778 old_crtc_state, old_conn_state);
6779 }
6780 }
6781
6782 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6783 struct intel_crtc *crtc)
6784 {
6785 const struct intel_crtc_state *old_crtc_state =
6786 intel_atomic_get_old_crtc_state(state, crtc);
6787 const struct drm_connector_state *old_conn_state;
6788 struct drm_connector *conn;
6789 int i;
6790
6791 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6792 struct intel_encoder *encoder =
6793 to_intel_encoder(old_conn_state->best_encoder);
6794
6795 if (old_conn_state->crtc != &crtc->base)
6796 continue;
6797
6798 if (encoder->post_disable)
6799 encoder->post_disable(state, encoder,
6800 old_crtc_state, old_conn_state);
6801 }
6802 }
6803
6804 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6805 struct intel_crtc *crtc)
6806 {
6807 const struct intel_crtc_state *old_crtc_state =
6808 intel_atomic_get_old_crtc_state(state, crtc);
6809 const struct drm_connector_state *old_conn_state;
6810 struct drm_connector *conn;
6811 int i;
6812
6813 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6814 struct intel_encoder *encoder =
6815 to_intel_encoder(old_conn_state->best_encoder);
6816
6817 if (old_conn_state->crtc != &crtc->base)
6818 continue;
6819
6820 if (encoder->post_pll_disable)
6821 encoder->post_pll_disable(state, encoder,
6822 old_crtc_state, old_conn_state);
6823 }
6824 }
6825
6826 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6827 struct intel_crtc *crtc)
6828 {
6829 const struct intel_crtc_state *crtc_state =
6830 intel_atomic_get_new_crtc_state(state, crtc);
6831 const struct drm_connector_state *conn_state;
6832 struct drm_connector *conn;
6833 int i;
6834
6835 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6836 struct intel_encoder *encoder =
6837 to_intel_encoder(conn_state->best_encoder);
6838
6839 if (conn_state->crtc != &crtc->base)
6840 continue;
6841
6842 if (encoder->update_pipe)
6843 encoder->update_pipe(state, encoder,
6844 crtc_state, conn_state);
6845 }
6846 }
6847
6848 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6849 {
6850 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6851 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6852
6853 plane->disable_plane(plane, crtc_state);
6854 }
6855
6856 static void ilk_crtc_enable(struct intel_atomic_state *state,
6857 struct intel_crtc *crtc)
6858 {
6859 const struct intel_crtc_state *new_crtc_state =
6860 intel_atomic_get_new_crtc_state(state, crtc);
6861 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6862 enum pipe pipe = crtc->pipe;
6863
6864 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6865 return;
6866
6867 /*
6868 * Sometimes spurious CPU pipe underruns happen during FDI
6869 * training, at least with VGA+HDMI cloning. Suppress them.
6870 *
6871 * On ILK we get an occasional spurious CPU pipe underruns
6872 * between eDP port A enable and vdd enable. Also PCH port
6873 * enable seems to result in the occasional CPU pipe underrun.
6874 *
6875 * Spurious PCH underruns also occur during PCH enabling.
6876 */
6877 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6878 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6879
6880 if (new_crtc_state->has_pch_encoder)
6881 intel_prepare_shared_dpll(new_crtc_state);
6882
6883 if (intel_crtc_has_dp_encoder(new_crtc_state))
6884 intel_dp_set_m_n(new_crtc_state, M1_N1);
6885
6886 intel_set_pipe_timings(new_crtc_state);
6887 intel_set_pipe_src_size(new_crtc_state);
6888
6889 if (new_crtc_state->has_pch_encoder)
6890 intel_cpu_transcoder_set_m_n(new_crtc_state,
6891 &new_crtc_state->fdi_m_n, NULL);
6892
6893 ilk_set_pipeconf(new_crtc_state);
6894
6895 crtc->active = true;
6896
6897 intel_encoders_pre_enable(state, crtc);
6898
6899 if (new_crtc_state->has_pch_encoder) {
6900 /* Note: FDI PLL enabling _must_ be done before we enable the
6901 * cpu pipes, hence this is separate from all the other fdi/pch
6902 * enabling. */
6903 ilk_fdi_pll_enable(new_crtc_state);
6904 } else {
6905 assert_fdi_tx_disabled(dev_priv, pipe);
6906 assert_fdi_rx_disabled(dev_priv, pipe);
6907 }
6908
6909 ilk_pfit_enable(new_crtc_state);
6910
6911 /*
6912 * On ILK+ LUT must be loaded before the pipe is running but with
6913 * clocks enabled
6914 */
6915 intel_color_load_luts(new_crtc_state);
6916 intel_color_commit(new_crtc_state);
6917 /* update DSPCNTR to configure gamma for pipe bottom color */
6918 intel_disable_primary_plane(new_crtc_state);
6919
6920 if (dev_priv->display.initial_watermarks)
6921 dev_priv->display.initial_watermarks(state, crtc);
6922 intel_enable_pipe(new_crtc_state);
6923
6924 if (new_crtc_state->has_pch_encoder)
6925 ilk_pch_enable(state, new_crtc_state);
6926
6927 intel_crtc_vblank_on(new_crtc_state);
6928
6929 intel_encoders_enable(state, crtc);
6930
6931 if (HAS_PCH_CPT(dev_priv))
6932 cpt_verify_modeset(dev_priv, pipe);
6933
6934 /*
6935 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6936 * And a second vblank wait is needed at least on ILK with
6937 * some interlaced HDMI modes. Let's do the double wait always
6938 * in case there are more corner cases we don't know about.
6939 */
6940 if (new_crtc_state->has_pch_encoder) {
6941 intel_wait_for_vblank(dev_priv, pipe);
6942 intel_wait_for_vblank(dev_priv, pipe);
6943 }
6944 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6945 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6946 }
6947
6948 /* IPS only exists on ULT machines and is tied to pipe A. */
6949 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6950 {
6951 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6952 }
6953
6954 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6955 enum pipe pipe, bool apply)
6956 {
6957 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
6958 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6959
6960 if (apply)
6961 val |= mask;
6962 else
6963 val &= ~mask;
6964
6965 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
6966 }
6967
6968 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6969 {
6970 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6971 enum pipe pipe = crtc->pipe;
6972 u32 val;
6973
6974 val = MBUS_DBOX_A_CREDIT(2);
6975
6976 if (INTEL_GEN(dev_priv) >= 12) {
6977 val |= MBUS_DBOX_BW_CREDIT(2);
6978 val |= MBUS_DBOX_B_CREDIT(12);
6979 } else {
6980 val |= MBUS_DBOX_BW_CREDIT(1);
6981 val |= MBUS_DBOX_B_CREDIT(8);
6982 }
6983
6984 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
6985 }
6986
6987 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
6988 {
6989 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6990 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6991
6992 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
6993 HSW_LINETIME(crtc_state->linetime) |
6994 HSW_IPS_LINETIME(crtc_state->ips_linetime));
6995 }
6996
6997 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6998 {
6999 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7000 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7001 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
7002 u32 val;
7003
7004 val = intel_de_read(dev_priv, reg);
7005 val &= ~HSW_FRAME_START_DELAY_MASK;
7006 val |= HSW_FRAME_START_DELAY(0);
7007 intel_de_write(dev_priv, reg, val);
7008 }
7009
7010 static void hsw_crtc_enable(struct intel_atomic_state *state,
7011 struct intel_crtc *crtc)
7012 {
7013 const struct intel_crtc_state *new_crtc_state =
7014 intel_atomic_get_new_crtc_state(state, crtc);
7015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7016 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
7017 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
7018 bool psl_clkgate_wa;
7019
7020 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7021 return;
7022
7023 intel_encoders_pre_pll_enable(state, crtc);
7024
7025 if (new_crtc_state->shared_dpll)
7026 intel_enable_shared_dpll(new_crtc_state);
7027
7028 intel_encoders_pre_enable(state, crtc);
7029
7030 if (!transcoder_is_dsi(cpu_transcoder))
7031 intel_set_pipe_timings(new_crtc_state);
7032
7033 intel_set_pipe_src_size(new_crtc_state);
7034
7035 if (cpu_transcoder != TRANSCODER_EDP &&
7036 !transcoder_is_dsi(cpu_transcoder))
7037 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
7038 new_crtc_state->pixel_multiplier - 1);
7039
7040 if (new_crtc_state->has_pch_encoder)
7041 intel_cpu_transcoder_set_m_n(new_crtc_state,
7042 &new_crtc_state->fdi_m_n, NULL);
7043
7044 if (!transcoder_is_dsi(cpu_transcoder)) {
7045 hsw_set_frame_start_delay(new_crtc_state);
7046 hsw_set_pipeconf(new_crtc_state);
7047 }
7048
7049 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7050 bdw_set_pipemisc(new_crtc_state);
7051
7052 crtc->active = true;
7053
7054 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
7055 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
7056 new_crtc_state->pch_pfit.enabled;
7057 if (psl_clkgate_wa)
7058 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
7059
7060 if (INTEL_GEN(dev_priv) >= 9)
7061 skl_pfit_enable(new_crtc_state);
7062 else
7063 ilk_pfit_enable(new_crtc_state);
7064
7065 /*
7066 * On ILK+ LUT must be loaded before the pipe is running but with
7067 * clocks enabled
7068 */
7069 intel_color_load_luts(new_crtc_state);
7070 intel_color_commit(new_crtc_state);
7071 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
7072 if (INTEL_GEN(dev_priv) < 9)
7073 intel_disable_primary_plane(new_crtc_state);
7074
7075 hsw_set_linetime_wm(new_crtc_state);
7076
7077 if (INTEL_GEN(dev_priv) >= 11)
7078 icl_set_pipe_chicken(crtc);
7079
7080 if (dev_priv->display.initial_watermarks)
7081 dev_priv->display.initial_watermarks(state, crtc);
7082
7083 if (INTEL_GEN(dev_priv) >= 11)
7084 icl_pipe_mbus_enable(crtc);
7085
7086 intel_encoders_enable(state, crtc);
7087
7088 if (psl_clkgate_wa) {
7089 intel_wait_for_vblank(dev_priv, pipe);
7090 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
7091 }
7092
7093 /* If we change the relative order between pipe/planes enabling, we need
7094 * to change the workaround. */
7095 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
7096 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
7097 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7098 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7099 }
7100 }
7101
7102 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7103 {
7104 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7105 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7106 enum pipe pipe = crtc->pipe;
7107
7108 /* To avoid upsetting the power well on haswell only disable the pfit if
7109 * it's in use. The hw state code will make sure we get this right. */
7110 if (!old_crtc_state->pch_pfit.enabled)
7111 return;
7112
7113 intel_de_write(dev_priv, PF_CTL(pipe), 0);
7114 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
7115 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
7116 }
7117
7118 static void ilk_crtc_disable(struct intel_atomic_state *state,
7119 struct intel_crtc *crtc)
7120 {
7121 const struct intel_crtc_state *old_crtc_state =
7122 intel_atomic_get_old_crtc_state(state, crtc);
7123 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7124 enum pipe pipe = crtc->pipe;
7125
7126 /*
7127 * Sometimes spurious CPU pipe underruns happen when the
7128 * pipe is already disabled, but FDI RX/TX is still enabled.
7129 * Happens at least with VGA+HDMI cloning. Suppress them.
7130 */
7131 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7132 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7133
7134 intel_encoders_disable(state, crtc);
7135
7136 intel_crtc_vblank_off(old_crtc_state);
7137
7138 intel_disable_pipe(old_crtc_state);
7139
7140 ilk_pfit_disable(old_crtc_state);
7141
7142 if (old_crtc_state->has_pch_encoder)
7143 ilk_fdi_disable(crtc);
7144
7145 intel_encoders_post_disable(state, crtc);
7146
7147 if (old_crtc_state->has_pch_encoder) {
7148 ilk_disable_pch_transcoder(dev_priv, pipe);
7149
7150 if (HAS_PCH_CPT(dev_priv)) {
7151 i915_reg_t reg;
7152 u32 temp;
7153
7154 /* disable TRANS_DP_CTL */
7155 reg = TRANS_DP_CTL(pipe);
7156 temp = intel_de_read(dev_priv, reg);
7157 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
7158 TRANS_DP_PORT_SEL_MASK);
7159 temp |= TRANS_DP_PORT_SEL_NONE;
7160 intel_de_write(dev_priv, reg, temp);
7161
7162 /* disable DPLL_SEL */
7163 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7164 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
7165 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
7166 }
7167
7168 ilk_fdi_pll_disable(crtc);
7169 }
7170
7171 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7172 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7173 }
7174
7175 static void hsw_crtc_disable(struct intel_atomic_state *state,
7176 struct intel_crtc *crtc)
7177 {
7178 /*
7179 * FIXME collapse everything to one hook.
7180 * Need care with mst->ddi interactions.
7181 */
7182 intel_encoders_disable(state, crtc);
7183 intel_encoders_post_disable(state, crtc);
7184 }
7185
7186 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
7187 {
7188 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7189 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7190
7191 if (!crtc_state->gmch_pfit.control)
7192 return;
7193
7194 /*
7195 * The panel fitter should only be adjusted whilst the pipe is disabled,
7196 * according to register description and PRM.
7197 */
7198 drm_WARN_ON(&dev_priv->drm,
7199 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
7200 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
7201
7202 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
7203 crtc_state->gmch_pfit.pgm_ratios);
7204 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
7205
7206 /* Border color in case we don't scale up to the full screen. Black by
7207 * default, change to something else for debugging. */
7208 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
7209 }
7210
7211 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
7212 {
7213 if (phy == PHY_NONE)
7214 return false;
7215
7216 if (IS_ELKHARTLAKE(dev_priv))
7217 return phy <= PHY_C;
7218
7219 if (INTEL_GEN(dev_priv) >= 11)
7220 return phy <= PHY_B;
7221
7222 return false;
7223 }
7224
7225 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
7226 {
7227 if (INTEL_GEN(dev_priv) >= 12)
7228 return phy >= PHY_D && phy <= PHY_I;
7229
7230 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
7231 return phy >= PHY_C && phy <= PHY_F;
7232
7233 return false;
7234 }
7235
7236 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
7237 {
7238 if (IS_ELKHARTLAKE(i915) && port == PORT_D)
7239 return PHY_A;
7240
7241 return (enum phy)port;
7242 }
7243
7244 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
7245 {
7246 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
7247 return PORT_TC_NONE;
7248
7249 if (INTEL_GEN(dev_priv) >= 12)
7250 return port - PORT_D;
7251
7252 return port - PORT_C;
7253 }
7254
7255 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
7256 {
7257 switch (port) {
7258 case PORT_A:
7259 return POWER_DOMAIN_PORT_DDI_A_LANES;
7260 case PORT_B:
7261 return POWER_DOMAIN_PORT_DDI_B_LANES;
7262 case PORT_C:
7263 return POWER_DOMAIN_PORT_DDI_C_LANES;
7264 case PORT_D:
7265 return POWER_DOMAIN_PORT_DDI_D_LANES;
7266 case PORT_E:
7267 return POWER_DOMAIN_PORT_DDI_E_LANES;
7268 case PORT_F:
7269 return POWER_DOMAIN_PORT_DDI_F_LANES;
7270 case PORT_G:
7271 return POWER_DOMAIN_PORT_DDI_G_LANES;
7272 default:
7273 MISSING_CASE(port);
7274 return POWER_DOMAIN_PORT_OTHER;
7275 }
7276 }
7277
7278 enum intel_display_power_domain
7279 intel_aux_power_domain(struct intel_digital_port *dig_port)
7280 {
7281 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7282 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7283
7284 if (intel_phy_is_tc(dev_priv, phy) &&
7285 dig_port->tc_mode == TC_PORT_TBT_ALT) {
7286 switch (dig_port->aux_ch) {
7287 case AUX_CH_C:
7288 return POWER_DOMAIN_AUX_C_TBT;
7289 case AUX_CH_D:
7290 return POWER_DOMAIN_AUX_D_TBT;
7291 case AUX_CH_E:
7292 return POWER_DOMAIN_AUX_E_TBT;
7293 case AUX_CH_F:
7294 return POWER_DOMAIN_AUX_F_TBT;
7295 case AUX_CH_G:
7296 return POWER_DOMAIN_AUX_G_TBT;
7297 default:
7298 MISSING_CASE(dig_port->aux_ch);
7299 return POWER_DOMAIN_AUX_C_TBT;
7300 }
7301 }
7302
7303 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
7304 }
7305
7306 /*
7307 * Converts aux_ch to power_domain without caring about TBT ports for that use
7308 * intel_aux_power_domain()
7309 */
7310 enum intel_display_power_domain
7311 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
7312 {
7313 switch (aux_ch) {
7314 case AUX_CH_A:
7315 return POWER_DOMAIN_AUX_A;
7316 case AUX_CH_B:
7317 return POWER_DOMAIN_AUX_B;
7318 case AUX_CH_C:
7319 return POWER_DOMAIN_AUX_C;
7320 case AUX_CH_D:
7321 return POWER_DOMAIN_AUX_D;
7322 case AUX_CH_E:
7323 return POWER_DOMAIN_AUX_E;
7324 case AUX_CH_F:
7325 return POWER_DOMAIN_AUX_F;
7326 case AUX_CH_G:
7327 return POWER_DOMAIN_AUX_G;
7328 default:
7329 MISSING_CASE(aux_ch);
7330 return POWER_DOMAIN_AUX_A;
7331 }
7332 }
7333
7334 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7335 {
7336 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7337 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7338 struct drm_encoder *encoder;
7339 enum pipe pipe = crtc->pipe;
7340 u64 mask;
7341 enum transcoder transcoder = crtc_state->cpu_transcoder;
7342
7343 if (!crtc_state->hw.active)
7344 return 0;
7345
7346 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7347 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7348 if (crtc_state->pch_pfit.enabled ||
7349 crtc_state->pch_pfit.force_thru)
7350 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7351
7352 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7353 crtc_state->uapi.encoder_mask) {
7354 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7355
7356 mask |= BIT_ULL(intel_encoder->power_domain);
7357 }
7358
7359 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7360 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7361
7362 if (crtc_state->shared_dpll)
7363 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7364
7365 return mask;
7366 }
7367
7368 static u64
7369 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7370 {
7371 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7372 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7373 enum intel_display_power_domain domain;
7374 u64 domains, new_domains, old_domains;
7375
7376 old_domains = crtc->enabled_power_domains;
7377 crtc->enabled_power_domains = new_domains =
7378 get_crtc_power_domains(crtc_state);
7379
7380 domains = new_domains & ~old_domains;
7381
7382 for_each_power_domain(domain, domains)
7383 intel_display_power_get(dev_priv, domain);
7384
7385 return old_domains & ~new_domains;
7386 }
7387
7388 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7389 u64 domains)
7390 {
7391 enum intel_display_power_domain domain;
7392
7393 for_each_power_domain(domain, domains)
7394 intel_display_power_put_unchecked(dev_priv, domain);
7395 }
7396
7397 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7398 struct intel_crtc *crtc)
7399 {
7400 const struct intel_crtc_state *new_crtc_state =
7401 intel_atomic_get_new_crtc_state(state, crtc);
7402 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7403 enum pipe pipe = crtc->pipe;
7404
7405 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7406 return;
7407
7408 if (intel_crtc_has_dp_encoder(new_crtc_state))
7409 intel_dp_set_m_n(new_crtc_state, M1_N1);
7410
7411 intel_set_pipe_timings(new_crtc_state);
7412 intel_set_pipe_src_size(new_crtc_state);
7413
7414 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7415 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7416 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7417 }
7418
7419 i9xx_set_pipeconf(new_crtc_state);
7420
7421 crtc->active = true;
7422
7423 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7424
7425 intel_encoders_pre_pll_enable(state, crtc);
7426
7427 if (IS_CHERRYVIEW(dev_priv)) {
7428 chv_prepare_pll(crtc, new_crtc_state);
7429 chv_enable_pll(crtc, new_crtc_state);
7430 } else {
7431 vlv_prepare_pll(crtc, new_crtc_state);
7432 vlv_enable_pll(crtc, new_crtc_state);
7433 }
7434
7435 intel_encoders_pre_enable(state, crtc);
7436
7437 i9xx_pfit_enable(new_crtc_state);
7438
7439 intel_color_load_luts(new_crtc_state);
7440 intel_color_commit(new_crtc_state);
7441 /* update DSPCNTR to configure gamma for pipe bottom color */
7442 intel_disable_primary_plane(new_crtc_state);
7443
7444 dev_priv->display.initial_watermarks(state, crtc);
7445 intel_enable_pipe(new_crtc_state);
7446
7447 intel_crtc_vblank_on(new_crtc_state);
7448
7449 intel_encoders_enable(state, crtc);
7450 }
7451
7452 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7453 {
7454 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7455 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7456
7457 intel_de_write(dev_priv, FP0(crtc->pipe),
7458 crtc_state->dpll_hw_state.fp0);
7459 intel_de_write(dev_priv, FP1(crtc->pipe),
7460 crtc_state->dpll_hw_state.fp1);
7461 }
7462
7463 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7464 struct intel_crtc *crtc)
7465 {
7466 const struct intel_crtc_state *new_crtc_state =
7467 intel_atomic_get_new_crtc_state(state, crtc);
7468 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7469 enum pipe pipe = crtc->pipe;
7470
7471 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7472 return;
7473
7474 i9xx_set_pll_dividers(new_crtc_state);
7475
7476 if (intel_crtc_has_dp_encoder(new_crtc_state))
7477 intel_dp_set_m_n(new_crtc_state, M1_N1);
7478
7479 intel_set_pipe_timings(new_crtc_state);
7480 intel_set_pipe_src_size(new_crtc_state);
7481
7482 i9xx_set_pipeconf(new_crtc_state);
7483
7484 crtc->active = true;
7485
7486 if (!IS_GEN(dev_priv, 2))
7487 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7488
7489 intel_encoders_pre_enable(state, crtc);
7490
7491 i9xx_enable_pll(crtc, new_crtc_state);
7492
7493 i9xx_pfit_enable(new_crtc_state);
7494
7495 intel_color_load_luts(new_crtc_state);
7496 intel_color_commit(new_crtc_state);
7497 /* update DSPCNTR to configure gamma for pipe bottom color */
7498 intel_disable_primary_plane(new_crtc_state);
7499
7500 if (dev_priv->display.initial_watermarks)
7501 dev_priv->display.initial_watermarks(state, crtc);
7502 else
7503 intel_update_watermarks(crtc);
7504 intel_enable_pipe(new_crtc_state);
7505
7506 intel_crtc_vblank_on(new_crtc_state);
7507
7508 intel_encoders_enable(state, crtc);
7509 }
7510
7511 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7512 {
7513 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7514 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7515
7516 if (!old_crtc_state->gmch_pfit.control)
7517 return;
7518
7519 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7520
7521 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7522 intel_de_read(dev_priv, PFIT_CONTROL));
7523 intel_de_write(dev_priv, PFIT_CONTROL, 0);
7524 }
7525
7526 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7527 struct intel_crtc *crtc)
7528 {
7529 struct intel_crtc_state *old_crtc_state =
7530 intel_atomic_get_old_crtc_state(state, crtc);
7531 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7532 enum pipe pipe = crtc->pipe;
7533
7534 /*
7535 * On gen2 planes are double buffered but the pipe isn't, so we must
7536 * wait for planes to fully turn off before disabling the pipe.
7537 */
7538 if (IS_GEN(dev_priv, 2))
7539 intel_wait_for_vblank(dev_priv, pipe);
7540
7541 intel_encoders_disable(state, crtc);
7542
7543 intel_crtc_vblank_off(old_crtc_state);
7544
7545 intel_disable_pipe(old_crtc_state);
7546
7547 i9xx_pfit_disable(old_crtc_state);
7548
7549 intel_encoders_post_disable(state, crtc);
7550
7551 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7552 if (IS_CHERRYVIEW(dev_priv))
7553 chv_disable_pll(dev_priv, pipe);
7554 else if (IS_VALLEYVIEW(dev_priv))
7555 vlv_disable_pll(dev_priv, pipe);
7556 else
7557 i9xx_disable_pll(old_crtc_state);
7558 }
7559
7560 intel_encoders_post_pll_disable(state, crtc);
7561
7562 if (!IS_GEN(dev_priv, 2))
7563 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7564
7565 if (!dev_priv->display.initial_watermarks)
7566 intel_update_watermarks(crtc);
7567
7568 /* clock the pipe down to 640x480@60 to potentially save power */
7569 if (IS_I830(dev_priv))
7570 i830_enable_pipe(dev_priv, pipe);
7571 }
7572
7573 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7574 struct drm_modeset_acquire_ctx *ctx)
7575 {
7576 struct intel_encoder *encoder;
7577 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7578 struct intel_bw_state *bw_state =
7579 to_intel_bw_state(dev_priv->bw_obj.state);
7580 struct intel_cdclk_state *cdclk_state =
7581 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7582 struct intel_crtc_state *crtc_state =
7583 to_intel_crtc_state(crtc->base.state);
7584 enum intel_display_power_domain domain;
7585 struct intel_plane *plane;
7586 struct drm_atomic_state *state;
7587 struct intel_crtc_state *temp_crtc_state;
7588 enum pipe pipe = crtc->pipe;
7589 u64 domains;
7590 int ret;
7591
7592 if (!crtc_state->hw.active)
7593 return;
7594
7595 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7596 const struct intel_plane_state *plane_state =
7597 to_intel_plane_state(plane->base.state);
7598
7599 if (plane_state->uapi.visible)
7600 intel_plane_disable_noatomic(crtc, plane);
7601 }
7602
7603 state = drm_atomic_state_alloc(&dev_priv->drm);
7604 if (!state) {
7605 drm_dbg_kms(&dev_priv->drm,
7606 "failed to disable [CRTC:%d:%s], out of memory",
7607 crtc->base.base.id, crtc->base.name);
7608 return;
7609 }
7610
7611 state->acquire_ctx = ctx;
7612
7613 /* Everything's already locked, -EDEADLK can't happen. */
7614 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7615 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7616
7617 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
7618
7619 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7620
7621 drm_atomic_state_put(state);
7622
7623 drm_dbg_kms(&dev_priv->drm,
7624 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7625 crtc->base.base.id, crtc->base.name);
7626
7627 crtc->active = false;
7628 crtc->base.enabled = false;
7629
7630 drm_WARN_ON(&dev_priv->drm,
7631 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7632 crtc_state->uapi.active = false;
7633 crtc_state->uapi.connector_mask = 0;
7634 crtc_state->uapi.encoder_mask = 0;
7635 intel_crtc_free_hw_state(crtc_state);
7636 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7637
7638 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7639 encoder->base.crtc = NULL;
7640
7641 intel_fbc_disable(crtc);
7642 intel_update_watermarks(crtc);
7643 intel_disable_shared_dpll(crtc_state);
7644
7645 domains = crtc->enabled_power_domains;
7646 for_each_power_domain(domain, domains)
7647 intel_display_power_put_unchecked(dev_priv, domain);
7648 crtc->enabled_power_domains = 0;
7649
7650 dev_priv->active_pipes &= ~BIT(pipe);
7651 cdclk_state->min_cdclk[pipe] = 0;
7652 cdclk_state->min_voltage_level[pipe] = 0;
7653 cdclk_state->active_pipes &= ~BIT(pipe);
7654
7655 bw_state->data_rate[pipe] = 0;
7656 bw_state->num_active_planes[pipe] = 0;
7657 }
7658
7659 /*
7660 * turn all crtc's off, but do not adjust state
7661 * This has to be paired with a call to intel_modeset_setup_hw_state.
7662 */
7663 int intel_display_suspend(struct drm_device *dev)
7664 {
7665 struct drm_i915_private *dev_priv = to_i915(dev);
7666 struct drm_atomic_state *state;
7667 int ret;
7668
7669 state = drm_atomic_helper_suspend(dev);
7670 ret = PTR_ERR_OR_ZERO(state);
7671 if (ret)
7672 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7673 ret);
7674 else
7675 dev_priv->modeset_restore_state = state;
7676 return ret;
7677 }
7678
7679 void intel_encoder_destroy(struct drm_encoder *encoder)
7680 {
7681 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7682
7683 drm_encoder_cleanup(encoder);
7684 kfree(intel_encoder);
7685 }
7686
7687 /* Cross check the actual hw state with our own modeset state tracking (and it's
7688 * internal consistency). */
7689 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7690 struct drm_connector_state *conn_state)
7691 {
7692 struct intel_connector *connector = to_intel_connector(conn_state->connector);
7693 struct drm_i915_private *i915 = to_i915(connector->base.dev);
7694
7695 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7696 connector->base.base.id, connector->base.name);
7697
7698 if (connector->get_hw_state(connector)) {
7699 struct intel_encoder *encoder = intel_attached_encoder(connector);
7700
7701 I915_STATE_WARN(!crtc_state,
7702 "connector enabled without attached crtc\n");
7703
7704 if (!crtc_state)
7705 return;
7706
7707 I915_STATE_WARN(!crtc_state->hw.active,
7708 "connector is active, but attached crtc isn't\n");
7709
7710 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7711 return;
7712
7713 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7714 "atomic encoder doesn't match attached encoder\n");
7715
7716 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7717 "attached encoder crtc differs from connector crtc\n");
7718 } else {
7719 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7720 "attached crtc is active, but connector isn't\n");
7721 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7722 "best encoder set without crtc!\n");
7723 }
7724 }
7725
7726 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7727 {
7728 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7729 return crtc_state->fdi_lanes;
7730
7731 return 0;
7732 }
7733
7734 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7735 struct intel_crtc_state *pipe_config)
7736 {
7737 struct drm_i915_private *dev_priv = to_i915(dev);
7738 struct drm_atomic_state *state = pipe_config->uapi.state;
7739 struct intel_crtc *other_crtc;
7740 struct intel_crtc_state *other_crtc_state;
7741
7742 drm_dbg_kms(&dev_priv->drm,
7743 "checking fdi config on pipe %c, lanes %i\n",
7744 pipe_name(pipe), pipe_config->fdi_lanes);
7745 if (pipe_config->fdi_lanes > 4) {
7746 drm_dbg_kms(&dev_priv->drm,
7747 "invalid fdi lane config on pipe %c: %i lanes\n",
7748 pipe_name(pipe), pipe_config->fdi_lanes);
7749 return -EINVAL;
7750 }
7751
7752 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7753 if (pipe_config->fdi_lanes > 2) {
7754 drm_dbg_kms(&dev_priv->drm,
7755 "only 2 lanes on haswell, required: %i lanes\n",
7756 pipe_config->fdi_lanes);
7757 return -EINVAL;
7758 } else {
7759 return 0;
7760 }
7761 }
7762
7763 if (INTEL_NUM_PIPES(dev_priv) == 2)
7764 return 0;
7765
7766 /* Ivybridge 3 pipe is really complicated */
7767 switch (pipe) {
7768 case PIPE_A:
7769 return 0;
7770 case PIPE_B:
7771 if (pipe_config->fdi_lanes <= 2)
7772 return 0;
7773
7774 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7775 other_crtc_state =
7776 intel_atomic_get_crtc_state(state, other_crtc);
7777 if (IS_ERR(other_crtc_state))
7778 return PTR_ERR(other_crtc_state);
7779
7780 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7781 drm_dbg_kms(&dev_priv->drm,
7782 "invalid shared fdi lane config on pipe %c: %i lanes\n",
7783 pipe_name(pipe), pipe_config->fdi_lanes);
7784 return -EINVAL;
7785 }
7786 return 0;
7787 case PIPE_C:
7788 if (pipe_config->fdi_lanes > 2) {
7789 drm_dbg_kms(&dev_priv->drm,
7790 "only 2 lanes on pipe %c: required %i lanes\n",
7791 pipe_name(pipe), pipe_config->fdi_lanes);
7792 return -EINVAL;
7793 }
7794
7795 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7796 other_crtc_state =
7797 intel_atomic_get_crtc_state(state, other_crtc);
7798 if (IS_ERR(other_crtc_state))
7799 return PTR_ERR(other_crtc_state);
7800
7801 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7802 drm_dbg_kms(&dev_priv->drm,
7803 "fdi link B uses too many lanes to enable link C\n");
7804 return -EINVAL;
7805 }
7806 return 0;
7807 default:
7808 BUG();
7809 }
7810 }
7811
7812 #define RETRY 1
7813 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7814 struct intel_crtc_state *pipe_config)
7815 {
7816 struct drm_device *dev = intel_crtc->base.dev;
7817 struct drm_i915_private *i915 = to_i915(dev);
7818 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7819 int lane, link_bw, fdi_dotclock, ret;
7820 bool needs_recompute = false;
7821
7822 retry:
7823 /* FDI is a binary signal running at ~2.7GHz, encoding
7824 * each output octet as 10 bits. The actual frequency
7825 * is stored as a divider into a 100MHz clock, and the
7826 * mode pixel clock is stored in units of 1KHz.
7827 * Hence the bw of each lane in terms of the mode signal
7828 * is:
7829 */
7830 link_bw = intel_fdi_link_freq(i915, pipe_config);
7831
7832 fdi_dotclock = adjusted_mode->crtc_clock;
7833
7834 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7835 pipe_config->pipe_bpp);
7836
7837 pipe_config->fdi_lanes = lane;
7838
7839 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7840 link_bw, &pipe_config->fdi_m_n, false, false);
7841
7842 ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7843 if (ret == -EDEADLK)
7844 return ret;
7845
7846 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7847 pipe_config->pipe_bpp -= 2*3;
7848 drm_dbg_kms(&i915->drm,
7849 "fdi link bw constraint, reducing pipe bpp to %i\n",
7850 pipe_config->pipe_bpp);
7851 needs_recompute = true;
7852 pipe_config->bw_constrained = true;
7853
7854 goto retry;
7855 }
7856
7857 if (needs_recompute)
7858 return RETRY;
7859
7860 return ret;
7861 }
7862
7863 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7864 {
7865 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7867
7868 /* IPS only exists on ULT machines and is tied to pipe A. */
7869 if (!hsw_crtc_supports_ips(crtc))
7870 return false;
7871
7872 if (!i915_modparams.enable_ips)
7873 return false;
7874
7875 if (crtc_state->pipe_bpp > 24)
7876 return false;
7877
7878 /*
7879 * We compare against max which means we must take
7880 * the increased cdclk requirement into account when
7881 * calculating the new cdclk.
7882 *
7883 * Should measure whether using a lower cdclk w/o IPS
7884 */
7885 if (IS_BROADWELL(dev_priv) &&
7886 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7887 return false;
7888
7889 return true;
7890 }
7891
7892 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7893 {
7894 struct drm_i915_private *dev_priv =
7895 to_i915(crtc_state->uapi.crtc->dev);
7896 struct intel_atomic_state *state =
7897 to_intel_atomic_state(crtc_state->uapi.state);
7898
7899 crtc_state->ips_enabled = false;
7900
7901 if (!hsw_crtc_state_ips_capable(crtc_state))
7902 return 0;
7903
7904 /*
7905 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7906 * enabled and disabled dynamically based on package C states,
7907 * user space can't make reliable use of the CRCs, so let's just
7908 * completely disable it.
7909 */
7910 if (crtc_state->crc_enabled)
7911 return 0;
7912
7913 /* IPS should be fine as long as at least one plane is enabled. */
7914 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7915 return 0;
7916
7917 if (IS_BROADWELL(dev_priv)) {
7918 const struct intel_cdclk_state *cdclk_state;
7919
7920 cdclk_state = intel_atomic_get_cdclk_state(state);
7921 if (IS_ERR(cdclk_state))
7922 return PTR_ERR(cdclk_state);
7923
7924 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7925 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
7926 return 0;
7927 }
7928
7929 crtc_state->ips_enabled = true;
7930
7931 return 0;
7932 }
7933
7934 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7935 {
7936 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7937
7938 /* GDG double wide on either pipe, otherwise pipe A only */
7939 return INTEL_GEN(dev_priv) < 4 &&
7940 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7941 }
7942
7943 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
7944 {
7945 u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
7946 unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
7947
7948 /*
7949 * We only use IF-ID interlacing. If we ever use
7950 * PF-ID we'll need to adjust the pixel_rate here.
7951 */
7952
7953 if (!crtc_state->pch_pfit.enabled)
7954 return pixel_rate;
7955
7956 pipe_w = crtc_state->pipe_src_w;
7957 pipe_h = crtc_state->pipe_src_h;
7958
7959 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
7960 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
7961
7962 if (pipe_w < pfit_w)
7963 pipe_w = pfit_w;
7964 if (pipe_h < pfit_h)
7965 pipe_h = pfit_h;
7966
7967 if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
7968 !pfit_w || !pfit_h))
7969 return pixel_rate;
7970
7971 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7972 pfit_w * pfit_h);
7973 }
7974
7975 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7976 {
7977 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7978
7979 if (HAS_GMCH(dev_priv))
7980 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7981 crtc_state->pixel_rate =
7982 crtc_state->hw.adjusted_mode.crtc_clock;
7983 else
7984 crtc_state->pixel_rate =
7985 ilk_pipe_pixel_rate(crtc_state);
7986 }
7987
7988 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7989 struct intel_crtc_state *pipe_config)
7990 {
7991 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7992 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7993 int clock_limit = dev_priv->max_dotclk_freq;
7994
7995 if (INTEL_GEN(dev_priv) < 4) {
7996 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7997
7998 /*
7999 * Enable double wide mode when the dot clock
8000 * is > 90% of the (display) core speed.
8001 */
8002 if (intel_crtc_supports_double_wide(crtc) &&
8003 adjusted_mode->crtc_clock > clock_limit) {
8004 clock_limit = dev_priv->max_dotclk_freq;
8005 pipe_config->double_wide = true;
8006 }
8007 }
8008
8009 if (adjusted_mode->crtc_clock > clock_limit) {
8010 drm_dbg_kms(&dev_priv->drm,
8011 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
8012 adjusted_mode->crtc_clock, clock_limit,
8013 yesno(pipe_config->double_wide));
8014 return -EINVAL;
8015 }
8016
8017 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8018 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
8019 pipe_config->hw.ctm) {
8020 /*
8021 * There is only one pipe CSC unit per pipe, and we need that
8022 * for output conversion from RGB->YCBCR. So if CTM is already
8023 * applied we can't support YCBCR420 output.
8024 */
8025 drm_dbg_kms(&dev_priv->drm,
8026 "YCBCR420 and CTM together are not possible\n");
8027 return -EINVAL;
8028 }
8029
8030 /*
8031 * Pipe horizontal size must be even in:
8032 * - DVO ganged mode
8033 * - LVDS dual channel mode
8034 * - Double wide pipe
8035 */
8036 if (pipe_config->pipe_src_w & 1) {
8037 if (pipe_config->double_wide) {
8038 drm_dbg_kms(&dev_priv->drm,
8039 "Odd pipe source width not supported with double wide pipe\n");
8040 return -EINVAL;
8041 }
8042
8043 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
8044 intel_is_dual_link_lvds(dev_priv)) {
8045 drm_dbg_kms(&dev_priv->drm,
8046 "Odd pipe source width not supported with dual link LVDS\n");
8047 return -EINVAL;
8048 }
8049 }
8050
8051 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
8052 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8053 */
8054 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8055 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
8056 return -EINVAL;
8057
8058 intel_crtc_compute_pixel_rate(pipe_config);
8059
8060 if (pipe_config->has_pch_encoder)
8061 return ilk_fdi_compute_config(crtc, pipe_config);
8062
8063 return 0;
8064 }
8065
8066 static void
8067 intel_reduce_m_n_ratio(u32 *num, u32 *den)
8068 {
8069 while (*num > DATA_LINK_M_N_MASK ||
8070 *den > DATA_LINK_M_N_MASK) {
8071 *num >>= 1;
8072 *den >>= 1;
8073 }
8074 }
8075
8076 static void compute_m_n(unsigned int m, unsigned int n,
8077 u32 *ret_m, u32 *ret_n,
8078 bool constant_n)
8079 {
8080 /*
8081 * Several DP dongles in particular seem to be fussy about
8082 * too large link M/N values. Give N value as 0x8000 that
8083 * should be acceptable by specific devices. 0x8000 is the
8084 * specified fixed N value for asynchronous clock mode,
8085 * which the devices expect also in synchronous clock mode.
8086 */
8087 if (constant_n)
8088 *ret_n = 0x8000;
8089 else
8090 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
8091
8092 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
8093 intel_reduce_m_n_ratio(ret_m, ret_n);
8094 }
8095
8096 void
8097 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
8098 int pixel_clock, int link_clock,
8099 struct intel_link_m_n *m_n,
8100 bool constant_n, bool fec_enable)
8101 {
8102 u32 data_clock = bits_per_pixel * pixel_clock;
8103
8104 if (fec_enable)
8105 data_clock = intel_dp_mode_to_fec_clock(data_clock);
8106
8107 m_n->tu = 64;
8108 compute_m_n(data_clock,
8109 link_clock * nlanes * 8,
8110 &m_n->gmch_m, &m_n->gmch_n,
8111 constant_n);
8112
8113 compute_m_n(pixel_clock, link_clock,
8114 &m_n->link_m, &m_n->link_n,
8115 constant_n);
8116 }
8117
8118 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
8119 {
8120 /*
8121 * There may be no VBT; and if the BIOS enabled SSC we can
8122 * just keep using it to avoid unnecessary flicker. Whereas if the
8123 * BIOS isn't using it, don't assume it will work even if the VBT
8124 * indicates as much.
8125 */
8126 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
8127 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
8128 PCH_DREF_CONTROL) &
8129 DREF_SSC1_ENABLE;
8130
8131 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
8132 drm_dbg_kms(&dev_priv->drm,
8133 "SSC %s by BIOS, overriding VBT which says %s\n",
8134 enableddisabled(bios_lvds_use_ssc),
8135 enableddisabled(dev_priv->vbt.lvds_use_ssc));
8136 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
8137 }
8138 }
8139 }
8140
8141 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
8142 {
8143 if (i915_modparams.panel_use_ssc >= 0)
8144 return i915_modparams.panel_use_ssc != 0;
8145 return dev_priv->vbt.lvds_use_ssc
8146 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
8147 }
8148
8149 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
8150 {
8151 return (1 << dpll->n) << 16 | dpll->m2;
8152 }
8153
8154 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
8155 {
8156 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
8157 }
8158
8159 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
8160 struct intel_crtc_state *crtc_state,
8161 struct dpll *reduced_clock)
8162 {
8163 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8164 u32 fp, fp2 = 0;
8165
8166 if (IS_PINEVIEW(dev_priv)) {
8167 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
8168 if (reduced_clock)
8169 fp2 = pnv_dpll_compute_fp(reduced_clock);
8170 } else {
8171 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8172 if (reduced_clock)
8173 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8174 }
8175
8176 crtc_state->dpll_hw_state.fp0 = fp;
8177
8178 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8179 reduced_clock) {
8180 crtc_state->dpll_hw_state.fp1 = fp2;
8181 } else {
8182 crtc_state->dpll_hw_state.fp1 = fp;
8183 }
8184 }
8185
8186 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8187 pipe)
8188 {
8189 u32 reg_val;
8190
8191 /*
8192 * PLLB opamp always calibrates to max value of 0x3f, force enable it
8193 * and set it to a reasonable value instead.
8194 */
8195 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8196 reg_val &= 0xffffff00;
8197 reg_val |= 0x00000030;
8198 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8199
8200 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8201 reg_val &= 0x00ffffff;
8202 reg_val |= 0x8c000000;
8203 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8204
8205 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8206 reg_val &= 0xffffff00;
8207 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8208
8209 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8210 reg_val &= 0x00ffffff;
8211 reg_val |= 0xb0000000;
8212 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8213 }
8214
8215 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8216 const struct intel_link_m_n *m_n)
8217 {
8218 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8219 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8220 enum pipe pipe = crtc->pipe;
8221
8222 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8223 TU_SIZE(m_n->tu) | m_n->gmch_m);
8224 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8225 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8226 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8227 }
8228
8229 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8230 enum transcoder transcoder)
8231 {
8232 if (IS_HASWELL(dev_priv))
8233 return transcoder == TRANSCODER_EDP;
8234
8235 /*
8236 * Strictly speaking some registers are available before
8237 * gen7, but we only support DRRS on gen7+
8238 */
8239 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8240 }
8241
8242 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8243 const struct intel_link_m_n *m_n,
8244 const struct intel_link_m_n *m2_n2)
8245 {
8246 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8247 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8248 enum pipe pipe = crtc->pipe;
8249 enum transcoder transcoder = crtc_state->cpu_transcoder;
8250
8251 if (INTEL_GEN(dev_priv) >= 5) {
8252 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8253 TU_SIZE(m_n->tu) | m_n->gmch_m);
8254 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8255 m_n->gmch_n);
8256 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8257 m_n->link_m);
8258 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8259 m_n->link_n);
8260 /*
8261 * M2_N2 registers are set only if DRRS is supported
8262 * (to make sure the registers are not unnecessarily accessed).
8263 */
8264 if (m2_n2 && crtc_state->has_drrs &&
8265 transcoder_has_m2_n2(dev_priv, transcoder)) {
8266 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8267 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8268 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8269 m2_n2->gmch_n);
8270 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8271 m2_n2->link_m);
8272 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8273 m2_n2->link_n);
8274 }
8275 } else {
8276 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8277 TU_SIZE(m_n->tu) | m_n->gmch_m);
8278 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8279 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8280 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8281 }
8282 }
8283
8284 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8285 {
8286 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8287 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8288
8289 if (m_n == M1_N1) {
8290 dp_m_n = &crtc_state->dp_m_n;
8291 dp_m2_n2 = &crtc_state->dp_m2_n2;
8292 } else if (m_n == M2_N2) {
8293
8294 /*
8295 * M2_N2 registers are not supported. Hence m2_n2 divider value
8296 * needs to be programmed into M1_N1.
8297 */
8298 dp_m_n = &crtc_state->dp_m2_n2;
8299 } else {
8300 drm_err(&i915->drm, "Unsupported divider value\n");
8301 return;
8302 }
8303
8304 if (crtc_state->has_pch_encoder)
8305 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8306 else
8307 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8308 }
8309
8310 static void vlv_compute_dpll(struct intel_crtc *crtc,
8311 struct intel_crtc_state *pipe_config)
8312 {
8313 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8314 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8315 if (crtc->pipe != PIPE_A)
8316 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8317
8318 /* DPLL not used with DSI, but still need the rest set up */
8319 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8320 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8321 DPLL_EXT_BUFFER_ENABLE_VLV;
8322
8323 pipe_config->dpll_hw_state.dpll_md =
8324 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8325 }
8326
8327 static void chv_compute_dpll(struct intel_crtc *crtc,
8328 struct intel_crtc_state *pipe_config)
8329 {
8330 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8331 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8332 if (crtc->pipe != PIPE_A)
8333 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8334
8335 /* DPLL not used with DSI, but still need the rest set up */
8336 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8337 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8338
8339 pipe_config->dpll_hw_state.dpll_md =
8340 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8341 }
8342
8343 static void vlv_prepare_pll(struct intel_crtc *crtc,
8344 const struct intel_crtc_state *pipe_config)
8345 {
8346 struct drm_device *dev = crtc->base.dev;
8347 struct drm_i915_private *dev_priv = to_i915(dev);
8348 enum pipe pipe = crtc->pipe;
8349 u32 mdiv;
8350 u32 bestn, bestm1, bestm2, bestp1, bestp2;
8351 u32 coreclk, reg_val;
8352
8353 /* Enable Refclk */
8354 intel_de_write(dev_priv, DPLL(pipe),
8355 pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8356
8357 /* No need to actually set up the DPLL with DSI */
8358 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8359 return;
8360
8361 vlv_dpio_get(dev_priv);
8362
8363 bestn = pipe_config->dpll.n;
8364 bestm1 = pipe_config->dpll.m1;
8365 bestm2 = pipe_config->dpll.m2;
8366 bestp1 = pipe_config->dpll.p1;
8367 bestp2 = pipe_config->dpll.p2;
8368
8369 /* See eDP HDMI DPIO driver vbios notes doc */
8370
8371 /* PLL B needs special handling */
8372 if (pipe == PIPE_B)
8373 vlv_pllb_recal_opamp(dev_priv, pipe);
8374
8375 /* Set up Tx target for periodic Rcomp update */
8376 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8377
8378 /* Disable target IRef on PLL */
8379 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8380 reg_val &= 0x00ffffff;
8381 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8382
8383 /* Disable fast lock */
8384 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8385
8386 /* Set idtafcrecal before PLL is enabled */
8387 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8388 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8389 mdiv |= ((bestn << DPIO_N_SHIFT));
8390 mdiv |= (1 << DPIO_K_SHIFT);
8391
8392 /*
8393 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8394 * but we don't support that).
8395 * Note: don't use the DAC post divider as it seems unstable.
8396 */
8397 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8398 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8399
8400 mdiv |= DPIO_ENABLE_CALIBRATION;
8401 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8402
8403 /* Set HBR and RBR LPF coefficients */
8404 if (pipe_config->port_clock == 162000 ||
8405 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8406 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8407 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8408 0x009f0003);
8409 else
8410 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8411 0x00d0000f);
8412
8413 if (intel_crtc_has_dp_encoder(pipe_config)) {
8414 /* Use SSC source */
8415 if (pipe == PIPE_A)
8416 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8417 0x0df40000);
8418 else
8419 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8420 0x0df70000);
8421 } else { /* HDMI or VGA */
8422 /* Use bend source */
8423 if (pipe == PIPE_A)
8424 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8425 0x0df70000);
8426 else
8427 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8428 0x0df40000);
8429 }
8430
8431 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8432 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8433 if (intel_crtc_has_dp_encoder(pipe_config))
8434 coreclk |= 0x01000000;
8435 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8436
8437 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8438
8439 vlv_dpio_put(dev_priv);
8440 }
8441
8442 static void chv_prepare_pll(struct intel_crtc *crtc,
8443 const struct intel_crtc_state *pipe_config)
8444 {
8445 struct drm_device *dev = crtc->base.dev;
8446 struct drm_i915_private *dev_priv = to_i915(dev);
8447 enum pipe pipe = crtc->pipe;
8448 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8449 u32 loopfilter, tribuf_calcntr;
8450 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8451 u32 dpio_val;
8452 int vco;
8453
8454 /* Enable Refclk and SSC */
8455 intel_de_write(dev_priv, DPLL(pipe),
8456 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8457
8458 /* No need to actually set up the DPLL with DSI */
8459 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8460 return;
8461
8462 bestn = pipe_config->dpll.n;
8463 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8464 bestm1 = pipe_config->dpll.m1;
8465 bestm2 = pipe_config->dpll.m2 >> 22;
8466 bestp1 = pipe_config->dpll.p1;
8467 bestp2 = pipe_config->dpll.p2;
8468 vco = pipe_config->dpll.vco;
8469 dpio_val = 0;
8470 loopfilter = 0;
8471
8472 vlv_dpio_get(dev_priv);
8473
8474 /* p1 and p2 divider */
8475 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8476 5 << DPIO_CHV_S1_DIV_SHIFT |
8477 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8478 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8479 1 << DPIO_CHV_K_DIV_SHIFT);
8480
8481 /* Feedback post-divider - m2 */
8482 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8483
8484 /* Feedback refclk divider - n and m1 */
8485 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8486 DPIO_CHV_M1_DIV_BY_2 |
8487 1 << DPIO_CHV_N_DIV_SHIFT);
8488
8489 /* M2 fraction division */
8490 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8491
8492 /* M2 fraction division enable */
8493 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8494 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8495 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8496 if (bestm2_frac)
8497 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8498 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8499
8500 /* Program digital lock detect threshold */
8501 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8502 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8503 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8504 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8505 if (!bestm2_frac)
8506 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8507 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8508
8509 /* Loop filter */
8510 if (vco == 5400000) {
8511 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8512 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8513 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8514 tribuf_calcntr = 0x9;
8515 } else if (vco <= 6200000) {
8516 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8517 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8518 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8519 tribuf_calcntr = 0x9;
8520 } else if (vco <= 6480000) {
8521 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8522 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8523 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8524 tribuf_calcntr = 0x8;
8525 } else {
8526 /* Not supported. Apply the same limits as in the max case */
8527 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8528 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8529 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8530 tribuf_calcntr = 0;
8531 }
8532 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8533
8534 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8535 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8536 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8537 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8538
8539 /* AFC Recal */
8540 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8541 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8542 DPIO_AFC_RECAL);
8543
8544 vlv_dpio_put(dev_priv);
8545 }
8546
8547 /**
8548 * vlv_force_pll_on - forcibly enable just the PLL
8549 * @dev_priv: i915 private structure
8550 * @pipe: pipe PLL to enable
8551 * @dpll: PLL configuration
8552 *
8553 * Enable the PLL for @pipe using the supplied @dpll config. To be used
8554 * in cases where we need the PLL enabled even when @pipe is not going to
8555 * be enabled.
8556 */
8557 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8558 const struct dpll *dpll)
8559 {
8560 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8561 struct intel_crtc_state *pipe_config;
8562
8563 pipe_config = intel_crtc_state_alloc(crtc);
8564 if (!pipe_config)
8565 return -ENOMEM;
8566
8567 pipe_config->cpu_transcoder = (enum transcoder)pipe;
8568 pipe_config->pixel_multiplier = 1;
8569 pipe_config->dpll = *dpll;
8570
8571 if (IS_CHERRYVIEW(dev_priv)) {
8572 chv_compute_dpll(crtc, pipe_config);
8573 chv_prepare_pll(crtc, pipe_config);
8574 chv_enable_pll(crtc, pipe_config);
8575 } else {
8576 vlv_compute_dpll(crtc, pipe_config);
8577 vlv_prepare_pll(crtc, pipe_config);
8578 vlv_enable_pll(crtc, pipe_config);
8579 }
8580
8581 kfree(pipe_config);
8582
8583 return 0;
8584 }
8585
8586 /**
8587 * vlv_force_pll_off - forcibly disable just the PLL
8588 * @dev_priv: i915 private structure
8589 * @pipe: pipe PLL to disable
8590 *
8591 * Disable the PLL for @pipe. To be used in cases where we need
8592 * the PLL enabled even when @pipe is not going to be enabled.
8593 */
8594 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8595 {
8596 if (IS_CHERRYVIEW(dev_priv))
8597 chv_disable_pll(dev_priv, pipe);
8598 else
8599 vlv_disable_pll(dev_priv, pipe);
8600 }
8601
8602 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8603 struct intel_crtc_state *crtc_state,
8604 struct dpll *reduced_clock)
8605 {
8606 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8607 u32 dpll;
8608 struct dpll *clock = &crtc_state->dpll;
8609
8610 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8611
8612 dpll = DPLL_VGA_MODE_DIS;
8613
8614 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8615 dpll |= DPLLB_MODE_LVDS;
8616 else
8617 dpll |= DPLLB_MODE_DAC_SERIAL;
8618
8619 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8620 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8621 dpll |= (crtc_state->pixel_multiplier - 1)
8622 << SDVO_MULTIPLIER_SHIFT_HIRES;
8623 }
8624
8625 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8626 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8627 dpll |= DPLL_SDVO_HIGH_SPEED;
8628
8629 if (intel_crtc_has_dp_encoder(crtc_state))
8630 dpll |= DPLL_SDVO_HIGH_SPEED;
8631
8632 /* compute bitmask from p1 value */
8633 if (IS_PINEVIEW(dev_priv))
8634 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8635 else {
8636 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8637 if (IS_G4X(dev_priv) && reduced_clock)
8638 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8639 }
8640 switch (clock->p2) {
8641 case 5:
8642 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8643 break;
8644 case 7:
8645 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8646 break;
8647 case 10:
8648 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8649 break;
8650 case 14:
8651 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8652 break;
8653 }
8654 if (INTEL_GEN(dev_priv) >= 4)
8655 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8656
8657 if (crtc_state->sdvo_tv_clock)
8658 dpll |= PLL_REF_INPUT_TVCLKINBC;
8659 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8660 intel_panel_use_ssc(dev_priv))
8661 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8662 else
8663 dpll |= PLL_REF_INPUT_DREFCLK;
8664
8665 dpll |= DPLL_VCO_ENABLE;
8666 crtc_state->dpll_hw_state.dpll = dpll;
8667
8668 if (INTEL_GEN(dev_priv) >= 4) {
8669 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8670 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8671 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8672 }
8673 }
8674
8675 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8676 struct intel_crtc_state *crtc_state,
8677 struct dpll *reduced_clock)
8678 {
8679 struct drm_device *dev = crtc->base.dev;
8680 struct drm_i915_private *dev_priv = to_i915(dev);
8681 u32 dpll;
8682 struct dpll *clock = &crtc_state->dpll;
8683
8684 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8685
8686 dpll = DPLL_VGA_MODE_DIS;
8687
8688 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8689 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8690 } else {
8691 if (clock->p1 == 2)
8692 dpll |= PLL_P1_DIVIDE_BY_TWO;
8693 else
8694 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8695 if (clock->p2 == 4)
8696 dpll |= PLL_P2_DIVIDE_BY_4;
8697 }
8698
8699 /*
8700 * Bspec:
8701 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8702 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8703 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8704 * Enable) must be set to “1” in both the DPLL A Control Register
8705 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8706 *
8707 * For simplicity We simply keep both bits always enabled in
8708 * both DPLLS. The spec says we should disable the DVO 2X clock
8709 * when not needed, but this seems to work fine in practice.
8710 */
8711 if (IS_I830(dev_priv) ||
8712 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8713 dpll |= DPLL_DVO_2X_MODE;
8714
8715 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8716 intel_panel_use_ssc(dev_priv))
8717 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8718 else
8719 dpll |= PLL_REF_INPUT_DREFCLK;
8720
8721 dpll |= DPLL_VCO_ENABLE;
8722 crtc_state->dpll_hw_state.dpll = dpll;
8723 }
8724
8725 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8726 {
8727 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8728 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8729 enum pipe pipe = crtc->pipe;
8730 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8731 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8732 u32 crtc_vtotal, crtc_vblank_end;
8733 int vsyncshift = 0;
8734
8735 /* We need to be careful not to changed the adjusted mode, for otherwise
8736 * the hw state checker will get angry at the mismatch. */
8737 crtc_vtotal = adjusted_mode->crtc_vtotal;
8738 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8739
8740 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8741 /* the chip adds 2 halflines automatically */
8742 crtc_vtotal -= 1;
8743 crtc_vblank_end -= 1;
8744
8745 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8746 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8747 else
8748 vsyncshift = adjusted_mode->crtc_hsync_start -
8749 adjusted_mode->crtc_htotal / 2;
8750 if (vsyncshift < 0)
8751 vsyncshift += adjusted_mode->crtc_htotal;
8752 }
8753
8754 if (INTEL_GEN(dev_priv) > 3)
8755 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8756 vsyncshift);
8757
8758 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8759 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8760 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8761 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8762 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8763 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8764
8765 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8766 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8767 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8768 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8769 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8770 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8771
8772 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8773 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8774 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8775 * bits. */
8776 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8777 (pipe == PIPE_B || pipe == PIPE_C))
8778 intel_de_write(dev_priv, VTOTAL(pipe),
8779 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8780
8781 }
8782
8783 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8784 {
8785 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8786 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8787 enum pipe pipe = crtc->pipe;
8788
8789 /* pipesrc controls the size that is scaled from, which should
8790 * always be the user's requested size.
8791 */
8792 intel_de_write(dev_priv, PIPESRC(pipe),
8793 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8794 }
8795
8796 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8797 {
8798 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8799 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8800
8801 if (IS_GEN(dev_priv, 2))
8802 return false;
8803
8804 if (INTEL_GEN(dev_priv) >= 9 ||
8805 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8806 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8807 else
8808 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8809 }
8810
8811 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8812 struct intel_crtc_state *pipe_config)
8813 {
8814 struct drm_device *dev = crtc->base.dev;
8815 struct drm_i915_private *dev_priv = to_i915(dev);
8816 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8817 u32 tmp;
8818
8819 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8820 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8821 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8822
8823 if (!transcoder_is_dsi(cpu_transcoder)) {
8824 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8825 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8826 (tmp & 0xffff) + 1;
8827 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8828 ((tmp >> 16) & 0xffff) + 1;
8829 }
8830 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8831 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8832 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8833
8834 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8835 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8836 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8837
8838 if (!transcoder_is_dsi(cpu_transcoder)) {
8839 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8840 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8841 (tmp & 0xffff) + 1;
8842 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8843 ((tmp >> 16) & 0xffff) + 1;
8844 }
8845 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8846 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8847 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8848
8849 if (intel_pipe_is_interlaced(pipe_config)) {
8850 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8851 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8852 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8853 }
8854 }
8855
8856 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8857 struct intel_crtc_state *pipe_config)
8858 {
8859 struct drm_device *dev = crtc->base.dev;
8860 struct drm_i915_private *dev_priv = to_i915(dev);
8861 u32 tmp;
8862
8863 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8864 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8865 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8866
8867 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8868 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8869 }
8870
8871 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8872 struct intel_crtc_state *pipe_config)
8873 {
8874 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8875 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8876 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8877 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8878
8879 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8880 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8881 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8882 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8883
8884 mode->flags = pipe_config->hw.adjusted_mode.flags;
8885 mode->type = DRM_MODE_TYPE_DRIVER;
8886
8887 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8888
8889 drm_mode_set_name(mode);
8890 }
8891
8892 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8893 {
8894 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8895 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8896 u32 pipeconf;
8897
8898 pipeconf = 0;
8899
8900 /* we keep both pipes enabled on 830 */
8901 if (IS_I830(dev_priv))
8902 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8903
8904 if (crtc_state->double_wide)
8905 pipeconf |= PIPECONF_DOUBLE_WIDE;
8906
8907 /* only g4x and later have fancy bpc/dither controls */
8908 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8909 IS_CHERRYVIEW(dev_priv)) {
8910 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8911 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8912 pipeconf |= PIPECONF_DITHER_EN |
8913 PIPECONF_DITHER_TYPE_SP;
8914
8915 switch (crtc_state->pipe_bpp) {
8916 case 18:
8917 pipeconf |= PIPECONF_6BPC;
8918 break;
8919 case 24:
8920 pipeconf |= PIPECONF_8BPC;
8921 break;
8922 case 30:
8923 pipeconf |= PIPECONF_10BPC;
8924 break;
8925 default:
8926 /* Case prevented by intel_choose_pipe_bpp_dither. */
8927 BUG();
8928 }
8929 }
8930
8931 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8932 if (INTEL_GEN(dev_priv) < 4 ||
8933 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8934 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8935 else
8936 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8937 } else {
8938 pipeconf |= PIPECONF_PROGRESSIVE;
8939 }
8940
8941 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8942 crtc_state->limited_color_range)
8943 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8944
8945 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8946
8947 pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8948
8949 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
8950 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
8951 }
8952
8953 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8954 struct intel_crtc_state *crtc_state)
8955 {
8956 struct drm_device *dev = crtc->base.dev;
8957 struct drm_i915_private *dev_priv = to_i915(dev);
8958 const struct intel_limit *limit;
8959 int refclk = 48000;
8960
8961 memset(&crtc_state->dpll_hw_state, 0,
8962 sizeof(crtc_state->dpll_hw_state));
8963
8964 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8965 if (intel_panel_use_ssc(dev_priv)) {
8966 refclk = dev_priv->vbt.lvds_ssc_freq;
8967 drm_dbg_kms(&dev_priv->drm,
8968 "using SSC reference clock of %d kHz\n",
8969 refclk);
8970 }
8971
8972 limit = &intel_limits_i8xx_lvds;
8973 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8974 limit = &intel_limits_i8xx_dvo;
8975 } else {
8976 limit = &intel_limits_i8xx_dac;
8977 }
8978
8979 if (!crtc_state->clock_set &&
8980 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8981 refclk, NULL, &crtc_state->dpll)) {
8982 drm_err(&dev_priv->drm,
8983 "Couldn't find PLL settings for mode!\n");
8984 return -EINVAL;
8985 }
8986
8987 i8xx_compute_dpll(crtc, crtc_state, NULL);
8988
8989 return 0;
8990 }
8991
8992 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8993 struct intel_crtc_state *crtc_state)
8994 {
8995 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8996 const struct intel_limit *limit;
8997 int refclk = 96000;
8998
8999 memset(&crtc_state->dpll_hw_state, 0,
9000 sizeof(crtc_state->dpll_hw_state));
9001
9002 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9003 if (intel_panel_use_ssc(dev_priv)) {
9004 refclk = dev_priv->vbt.lvds_ssc_freq;
9005 drm_dbg_kms(&dev_priv->drm,
9006 "using SSC reference clock of %d kHz\n",
9007 refclk);
9008 }
9009
9010 if (intel_is_dual_link_lvds(dev_priv))
9011 limit = &intel_limits_g4x_dual_channel_lvds;
9012 else
9013 limit = &intel_limits_g4x_single_channel_lvds;
9014 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
9015 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
9016 limit = &intel_limits_g4x_hdmi;
9017 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
9018 limit = &intel_limits_g4x_sdvo;
9019 } else {
9020 /* The option is for other outputs */
9021 limit = &intel_limits_i9xx_sdvo;
9022 }
9023
9024 if (!crtc_state->clock_set &&
9025 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9026 refclk, NULL, &crtc_state->dpll)) {
9027 drm_err(&dev_priv->drm,
9028 "Couldn't find PLL settings for mode!\n");
9029 return -EINVAL;
9030 }
9031
9032 i9xx_compute_dpll(crtc, crtc_state, NULL);
9033
9034 return 0;
9035 }
9036
9037 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
9038 struct intel_crtc_state *crtc_state)
9039 {
9040 struct drm_device *dev = crtc->base.dev;
9041 struct drm_i915_private *dev_priv = to_i915(dev);
9042 const struct intel_limit *limit;
9043 int refclk = 96000;
9044
9045 memset(&crtc_state->dpll_hw_state, 0,
9046 sizeof(crtc_state->dpll_hw_state));
9047
9048 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9049 if (intel_panel_use_ssc(dev_priv)) {
9050 refclk = dev_priv->vbt.lvds_ssc_freq;
9051 drm_dbg_kms(&dev_priv->drm,
9052 "using SSC reference clock of %d kHz\n",
9053 refclk);
9054 }
9055
9056 limit = &pnv_limits_lvds;
9057 } else {
9058 limit = &pnv_limits_sdvo;
9059 }
9060
9061 if (!crtc_state->clock_set &&
9062 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9063 refclk, NULL, &crtc_state->dpll)) {
9064 drm_err(&dev_priv->drm,
9065 "Couldn't find PLL settings for mode!\n");
9066 return -EINVAL;
9067 }
9068
9069 i9xx_compute_dpll(crtc, crtc_state, NULL);
9070
9071 return 0;
9072 }
9073
9074 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
9075 struct intel_crtc_state *crtc_state)
9076 {
9077 struct drm_device *dev = crtc->base.dev;
9078 struct drm_i915_private *dev_priv = to_i915(dev);
9079 const struct intel_limit *limit;
9080 int refclk = 96000;
9081
9082 memset(&crtc_state->dpll_hw_state, 0,
9083 sizeof(crtc_state->dpll_hw_state));
9084
9085 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9086 if (intel_panel_use_ssc(dev_priv)) {
9087 refclk = dev_priv->vbt.lvds_ssc_freq;
9088 drm_dbg_kms(&dev_priv->drm,
9089 "using SSC reference clock of %d kHz\n",
9090 refclk);
9091 }
9092
9093 limit = &intel_limits_i9xx_lvds;
9094 } else {
9095 limit = &intel_limits_i9xx_sdvo;
9096 }
9097
9098 if (!crtc_state->clock_set &&
9099 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9100 refclk, NULL, &crtc_state->dpll)) {
9101 drm_err(&dev_priv->drm,
9102 "Couldn't find PLL settings for mode!\n");
9103 return -EINVAL;
9104 }
9105
9106 i9xx_compute_dpll(crtc, crtc_state, NULL);
9107
9108 return 0;
9109 }
9110
9111 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
9112 struct intel_crtc_state *crtc_state)
9113 {
9114 int refclk = 100000;
9115 const struct intel_limit *limit = &intel_limits_chv;
9116 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9117
9118 memset(&crtc_state->dpll_hw_state, 0,
9119 sizeof(crtc_state->dpll_hw_state));
9120
9121 if (!crtc_state->clock_set &&
9122 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9123 refclk, NULL, &crtc_state->dpll)) {
9124 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9125 return -EINVAL;
9126 }
9127
9128 chv_compute_dpll(crtc, crtc_state);
9129
9130 return 0;
9131 }
9132
9133 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
9134 struct intel_crtc_state *crtc_state)
9135 {
9136 int refclk = 100000;
9137 const struct intel_limit *limit = &intel_limits_vlv;
9138 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9139
9140 memset(&crtc_state->dpll_hw_state, 0,
9141 sizeof(crtc_state->dpll_hw_state));
9142
9143 if (!crtc_state->clock_set &&
9144 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9145 refclk, NULL, &crtc_state->dpll)) {
9146 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9147 return -EINVAL;
9148 }
9149
9150 vlv_compute_dpll(crtc, crtc_state);
9151
9152 return 0;
9153 }
9154
9155 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
9156 {
9157 if (IS_I830(dev_priv))
9158 return false;
9159
9160 return INTEL_GEN(dev_priv) >= 4 ||
9161 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
9162 }
9163
9164 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
9165 {
9166 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9167 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9168 u32 tmp;
9169
9170 if (!i9xx_has_pfit(dev_priv))
9171 return;
9172
9173 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
9174 if (!(tmp & PFIT_ENABLE))
9175 return;
9176
9177 /* Check whether the pfit is attached to our pipe. */
9178 if (INTEL_GEN(dev_priv) < 4) {
9179 if (crtc->pipe != PIPE_B)
9180 return;
9181 } else {
9182 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
9183 return;
9184 }
9185
9186 crtc_state->gmch_pfit.control = tmp;
9187 crtc_state->gmch_pfit.pgm_ratios =
9188 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
9189 }
9190
9191 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
9192 struct intel_crtc_state *pipe_config)
9193 {
9194 struct drm_device *dev = crtc->base.dev;
9195 struct drm_i915_private *dev_priv = to_i915(dev);
9196 enum pipe pipe = crtc->pipe;
9197 struct dpll clock;
9198 u32 mdiv;
9199 int refclk = 100000;
9200
9201 /* In case of DSI, DPLL will not be used */
9202 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9203 return;
9204
9205 vlv_dpio_get(dev_priv);
9206 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9207 vlv_dpio_put(dev_priv);
9208
9209 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9210 clock.m2 = mdiv & DPIO_M2DIV_MASK;
9211 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9212 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9213 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9214
9215 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9216 }
9217
9218 static void
9219 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9220 struct intel_initial_plane_config *plane_config)
9221 {
9222 struct drm_device *dev = crtc->base.dev;
9223 struct drm_i915_private *dev_priv = to_i915(dev);
9224 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9225 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9226 enum pipe pipe;
9227 u32 val, base, offset;
9228 int fourcc, pixel_format;
9229 unsigned int aligned_height;
9230 struct drm_framebuffer *fb;
9231 struct intel_framebuffer *intel_fb;
9232
9233 if (!plane->get_hw_state(plane, &pipe))
9234 return;
9235
9236 drm_WARN_ON(dev, pipe != crtc->pipe);
9237
9238 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9239 if (!intel_fb) {
9240 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9241 return;
9242 }
9243
9244 fb = &intel_fb->base;
9245
9246 fb->dev = dev;
9247
9248 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9249
9250 if (INTEL_GEN(dev_priv) >= 4) {
9251 if (val & DISPPLANE_TILED) {
9252 plane_config->tiling = I915_TILING_X;
9253 fb->modifier = I915_FORMAT_MOD_X_TILED;
9254 }
9255
9256 if (val & DISPPLANE_ROTATE_180)
9257 plane_config->rotation = DRM_MODE_ROTATE_180;
9258 }
9259
9260 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9261 val & DISPPLANE_MIRROR)
9262 plane_config->rotation |= DRM_MODE_REFLECT_X;
9263
9264 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9265 fourcc = i9xx_format_to_fourcc(pixel_format);
9266 fb->format = drm_format_info(fourcc);
9267
9268 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9269 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9270 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9271 } else if (INTEL_GEN(dev_priv) >= 4) {
9272 if (plane_config->tiling)
9273 offset = intel_de_read(dev_priv,
9274 DSPTILEOFF(i9xx_plane));
9275 else
9276 offset = intel_de_read(dev_priv,
9277 DSPLINOFF(i9xx_plane));
9278 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9279 } else {
9280 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9281 }
9282 plane_config->base = base;
9283
9284 val = intel_de_read(dev_priv, PIPESRC(pipe));
9285 fb->width = ((val >> 16) & 0xfff) + 1;
9286 fb->height = ((val >> 0) & 0xfff) + 1;
9287
9288 val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9289 fb->pitches[0] = val & 0xffffffc0;
9290
9291 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9292
9293 plane_config->size = fb->pitches[0] * aligned_height;
9294
9295 drm_dbg_kms(&dev_priv->drm,
9296 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9297 crtc->base.name, plane->base.name, fb->width, fb->height,
9298 fb->format->cpp[0] * 8, base, fb->pitches[0],
9299 plane_config->size);
9300
9301 plane_config->fb = intel_fb;
9302 }
9303
9304 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9305 struct intel_crtc_state *pipe_config)
9306 {
9307 struct drm_device *dev = crtc->base.dev;
9308 struct drm_i915_private *dev_priv = to_i915(dev);
9309 enum pipe pipe = crtc->pipe;
9310 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9311 struct dpll clock;
9312 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9313 int refclk = 100000;
9314
9315 /* In case of DSI, DPLL will not be used */
9316 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9317 return;
9318
9319 vlv_dpio_get(dev_priv);
9320 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9321 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9322 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9323 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9324 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9325 vlv_dpio_put(dev_priv);
9326
9327 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9328 clock.m2 = (pll_dw0 & 0xff) << 22;
9329 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9330 clock.m2 |= pll_dw2 & 0x3fffff;
9331 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9332 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9333 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9334
9335 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9336 }
9337
9338 static enum intel_output_format
9339 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9340 {
9341 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9342 u32 tmp;
9343
9344 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9345
9346 if (tmp & PIPEMISC_YUV420_ENABLE) {
9347 /* We support 4:2:0 in full blend mode only */
9348 drm_WARN_ON(&dev_priv->drm,
9349 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9350
9351 return INTEL_OUTPUT_FORMAT_YCBCR420;
9352 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9353 return INTEL_OUTPUT_FORMAT_YCBCR444;
9354 } else {
9355 return INTEL_OUTPUT_FORMAT_RGB;
9356 }
9357 }
9358
9359 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9360 {
9361 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9362 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9363 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9364 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9365 u32 tmp;
9366
9367 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9368
9369 if (tmp & DISPPLANE_GAMMA_ENABLE)
9370 crtc_state->gamma_enable = true;
9371
9372 if (!HAS_GMCH(dev_priv) &&
9373 tmp & DISPPLANE_PIPE_CSC_ENABLE)
9374 crtc_state->csc_enable = true;
9375 }
9376
9377 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9378 struct intel_crtc_state *pipe_config)
9379 {
9380 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9381 enum intel_display_power_domain power_domain;
9382 intel_wakeref_t wakeref;
9383 u32 tmp;
9384 bool ret;
9385
9386 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9387 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9388 if (!wakeref)
9389 return false;
9390
9391 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9392 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9393 pipe_config->shared_dpll = NULL;
9394
9395 ret = false;
9396
9397 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9398 if (!(tmp & PIPECONF_ENABLE))
9399 goto out;
9400
9401 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9402 IS_CHERRYVIEW(dev_priv)) {
9403 switch (tmp & PIPECONF_BPC_MASK) {
9404 case PIPECONF_6BPC:
9405 pipe_config->pipe_bpp = 18;
9406 break;
9407 case PIPECONF_8BPC:
9408 pipe_config->pipe_bpp = 24;
9409 break;
9410 case PIPECONF_10BPC:
9411 pipe_config->pipe_bpp = 30;
9412 break;
9413 default:
9414 break;
9415 }
9416 }
9417
9418 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9419 (tmp & PIPECONF_COLOR_RANGE_SELECT))
9420 pipe_config->limited_color_range = true;
9421
9422 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9423 PIPECONF_GAMMA_MODE_SHIFT;
9424
9425 if (IS_CHERRYVIEW(dev_priv))
9426 pipe_config->cgm_mode = intel_de_read(dev_priv,
9427 CGM_PIPE_MODE(crtc->pipe));
9428
9429 i9xx_get_pipe_color_config(pipe_config);
9430 intel_color_get_config(pipe_config);
9431
9432 if (INTEL_GEN(dev_priv) < 4)
9433 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9434
9435 intel_get_pipe_timings(crtc, pipe_config);
9436 intel_get_pipe_src_size(crtc, pipe_config);
9437
9438 i9xx_get_pfit_config(pipe_config);
9439
9440 if (INTEL_GEN(dev_priv) >= 4) {
9441 /* No way to read it out on pipes B and C */
9442 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9443 tmp = dev_priv->chv_dpll_md[crtc->pipe];
9444 else
9445 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9446 pipe_config->pixel_multiplier =
9447 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9448 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9449 pipe_config->dpll_hw_state.dpll_md = tmp;
9450 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9451 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9452 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9453 pipe_config->pixel_multiplier =
9454 ((tmp & SDVO_MULTIPLIER_MASK)
9455 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9456 } else {
9457 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9458 * port and will be fixed up in the encoder->get_config
9459 * function. */
9460 pipe_config->pixel_multiplier = 1;
9461 }
9462 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9463 DPLL(crtc->pipe));
9464 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9465 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9466 FP0(crtc->pipe));
9467 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9468 FP1(crtc->pipe));
9469 } else {
9470 /* Mask out read-only status bits. */
9471 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9472 DPLL_PORTC_READY_MASK |
9473 DPLL_PORTB_READY_MASK);
9474 }
9475
9476 if (IS_CHERRYVIEW(dev_priv))
9477 chv_crtc_clock_get(crtc, pipe_config);
9478 else if (IS_VALLEYVIEW(dev_priv))
9479 vlv_crtc_clock_get(crtc, pipe_config);
9480 else
9481 i9xx_crtc_clock_get(crtc, pipe_config);
9482
9483 /*
9484 * Normally the dotclock is filled in by the encoder .get_config()
9485 * but in case the pipe is enabled w/o any ports we need a sane
9486 * default.
9487 */
9488 pipe_config->hw.adjusted_mode.crtc_clock =
9489 pipe_config->port_clock / pipe_config->pixel_multiplier;
9490
9491 ret = true;
9492
9493 out:
9494 intel_display_power_put(dev_priv, power_domain, wakeref);
9495
9496 return ret;
9497 }
9498
9499 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9500 {
9501 struct intel_encoder *encoder;
9502 int i;
9503 u32 val, final;
9504 bool has_lvds = false;
9505 bool has_cpu_edp = false;
9506 bool has_panel = false;
9507 bool has_ck505 = false;
9508 bool can_ssc = false;
9509 bool using_ssc_source = false;
9510
9511 /* We need to take the global config into account */
9512 for_each_intel_encoder(&dev_priv->drm, encoder) {
9513 switch (encoder->type) {
9514 case INTEL_OUTPUT_LVDS:
9515 has_panel = true;
9516 has_lvds = true;
9517 break;
9518 case INTEL_OUTPUT_EDP:
9519 has_panel = true;
9520 if (encoder->port == PORT_A)
9521 has_cpu_edp = true;
9522 break;
9523 default:
9524 break;
9525 }
9526 }
9527
9528 if (HAS_PCH_IBX(dev_priv)) {
9529 has_ck505 = dev_priv->vbt.display_clock_mode;
9530 can_ssc = has_ck505;
9531 } else {
9532 has_ck505 = false;
9533 can_ssc = true;
9534 }
9535
9536 /* Check if any DPLLs are using the SSC source */
9537 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
9538 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9539
9540 if (!(temp & DPLL_VCO_ENABLE))
9541 continue;
9542
9543 if ((temp & PLL_REF_INPUT_MASK) ==
9544 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9545 using_ssc_source = true;
9546 break;
9547 }
9548 }
9549
9550 drm_dbg_kms(&dev_priv->drm,
9551 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9552 has_panel, has_lvds, has_ck505, using_ssc_source);
9553
9554 /* Ironlake: try to setup display ref clock before DPLL
9555 * enabling. This is only under driver's control after
9556 * PCH B stepping, previous chipset stepping should be
9557 * ignoring this setting.
9558 */
9559 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9560
9561 /* As we must carefully and slowly disable/enable each source in turn,
9562 * compute the final state we want first and check if we need to
9563 * make any changes at all.
9564 */
9565 final = val;
9566 final &= ~DREF_NONSPREAD_SOURCE_MASK;
9567 if (has_ck505)
9568 final |= DREF_NONSPREAD_CK505_ENABLE;
9569 else
9570 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9571
9572 final &= ~DREF_SSC_SOURCE_MASK;
9573 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9574 final &= ~DREF_SSC1_ENABLE;
9575
9576 if (has_panel) {
9577 final |= DREF_SSC_SOURCE_ENABLE;
9578
9579 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9580 final |= DREF_SSC1_ENABLE;
9581
9582 if (has_cpu_edp) {
9583 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9584 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9585 else
9586 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9587 } else
9588 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9589 } else if (using_ssc_source) {
9590 final |= DREF_SSC_SOURCE_ENABLE;
9591 final |= DREF_SSC1_ENABLE;
9592 }
9593
9594 if (final == val)
9595 return;
9596
9597 /* Always enable nonspread source */
9598 val &= ~DREF_NONSPREAD_SOURCE_MASK;
9599
9600 if (has_ck505)
9601 val |= DREF_NONSPREAD_CK505_ENABLE;
9602 else
9603 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9604
9605 if (has_panel) {
9606 val &= ~DREF_SSC_SOURCE_MASK;
9607 val |= DREF_SSC_SOURCE_ENABLE;
9608
9609 /* SSC must be turned on before enabling the CPU output */
9610 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9611 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9612 val |= DREF_SSC1_ENABLE;
9613 } else
9614 val &= ~DREF_SSC1_ENABLE;
9615
9616 /* Get SSC going before enabling the outputs */
9617 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9618 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9619 udelay(200);
9620
9621 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9622
9623 /* Enable CPU source on CPU attached eDP */
9624 if (has_cpu_edp) {
9625 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9626 drm_dbg_kms(&dev_priv->drm,
9627 "Using SSC on eDP\n");
9628 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9629 } else
9630 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9631 } else
9632 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9633
9634 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9635 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9636 udelay(200);
9637 } else {
9638 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9639
9640 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9641
9642 /* Turn off CPU output */
9643 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9644
9645 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9646 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9647 udelay(200);
9648
9649 if (!using_ssc_source) {
9650 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9651
9652 /* Turn off the SSC source */
9653 val &= ~DREF_SSC_SOURCE_MASK;
9654 val |= DREF_SSC_SOURCE_DISABLE;
9655
9656 /* Turn off SSC1 */
9657 val &= ~DREF_SSC1_ENABLE;
9658
9659 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9660 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9661 udelay(200);
9662 }
9663 }
9664
9665 BUG_ON(val != final);
9666 }
9667
9668 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9669 {
9670 u32 tmp;
9671
9672 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9673 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9674 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9675
9676 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9677 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9678 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9679
9680 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9681 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9682 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9683
9684 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9685 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9686 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9687 }
9688
9689 /* WaMPhyProgramming:hsw */
9690 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9691 {
9692 u32 tmp;
9693
9694 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9695 tmp &= ~(0xFF << 24);
9696 tmp |= (0x12 << 24);
9697 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9698
9699 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9700 tmp |= (1 << 11);
9701 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9702
9703 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9704 tmp |= (1 << 11);
9705 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9706
9707 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9708 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9709 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9710
9711 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9712 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9713 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9714
9715 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9716 tmp &= ~(7 << 13);
9717 tmp |= (5 << 13);
9718 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9719
9720 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9721 tmp &= ~(7 << 13);
9722 tmp |= (5 << 13);
9723 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9724
9725 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9726 tmp &= ~0xFF;
9727 tmp |= 0x1C;
9728 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9729
9730 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9731 tmp &= ~0xFF;
9732 tmp |= 0x1C;
9733 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9734
9735 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9736 tmp &= ~(0xFF << 16);
9737 tmp |= (0x1C << 16);
9738 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9739
9740 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9741 tmp &= ~(0xFF << 16);
9742 tmp |= (0x1C << 16);
9743 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9744
9745 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9746 tmp |= (1 << 27);
9747 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9748
9749 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9750 tmp |= (1 << 27);
9751 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9752
9753 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9754 tmp &= ~(0xF << 28);
9755 tmp |= (4 << 28);
9756 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9757
9758 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9759 tmp &= ~(0xF << 28);
9760 tmp |= (4 << 28);
9761 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9762 }
9763
9764 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9765 * Programming" based on the parameters passed:
9766 * - Sequence to enable CLKOUT_DP
9767 * - Sequence to enable CLKOUT_DP without spread
9768 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9769 */
9770 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9771 bool with_spread, bool with_fdi)
9772 {
9773 u32 reg, tmp;
9774
9775 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
9776 "FDI requires downspread\n"))
9777 with_spread = true;
9778 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
9779 with_fdi, "LP PCH doesn't have FDI\n"))
9780 with_fdi = false;
9781
9782 mutex_lock(&dev_priv->sb_lock);
9783
9784 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9785 tmp &= ~SBI_SSCCTL_DISABLE;
9786 tmp |= SBI_SSCCTL_PATHALT;
9787 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9788
9789 udelay(24);
9790
9791 if (with_spread) {
9792 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9793 tmp &= ~SBI_SSCCTL_PATHALT;
9794 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9795
9796 if (with_fdi) {
9797 lpt_reset_fdi_mphy(dev_priv);
9798 lpt_program_fdi_mphy(dev_priv);
9799 }
9800 }
9801
9802 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9803 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9804 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9805 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9806
9807 mutex_unlock(&dev_priv->sb_lock);
9808 }
9809
9810 /* Sequence to disable CLKOUT_DP */
9811 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9812 {
9813 u32 reg, tmp;
9814
9815 mutex_lock(&dev_priv->sb_lock);
9816
9817 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9818 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9819 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9820 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9821
9822 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9823 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9824 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9825 tmp |= SBI_SSCCTL_PATHALT;
9826 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9827 udelay(32);
9828 }
9829 tmp |= SBI_SSCCTL_DISABLE;
9830 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9831 }
9832
9833 mutex_unlock(&dev_priv->sb_lock);
9834 }
9835
9836 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9837
9838 static const u16 sscdivintphase[] = {
9839 [BEND_IDX( 50)] = 0x3B23,
9840 [BEND_IDX( 45)] = 0x3B23,
9841 [BEND_IDX( 40)] = 0x3C23,
9842 [BEND_IDX( 35)] = 0x3C23,
9843 [BEND_IDX( 30)] = 0x3D23,
9844 [BEND_IDX( 25)] = 0x3D23,
9845 [BEND_IDX( 20)] = 0x3E23,
9846 [BEND_IDX( 15)] = 0x3E23,
9847 [BEND_IDX( 10)] = 0x3F23,
9848 [BEND_IDX( 5)] = 0x3F23,
9849 [BEND_IDX( 0)] = 0x0025,
9850 [BEND_IDX( -5)] = 0x0025,
9851 [BEND_IDX(-10)] = 0x0125,
9852 [BEND_IDX(-15)] = 0x0125,
9853 [BEND_IDX(-20)] = 0x0225,
9854 [BEND_IDX(-25)] = 0x0225,
9855 [BEND_IDX(-30)] = 0x0325,
9856 [BEND_IDX(-35)] = 0x0325,
9857 [BEND_IDX(-40)] = 0x0425,
9858 [BEND_IDX(-45)] = 0x0425,
9859 [BEND_IDX(-50)] = 0x0525,
9860 };
9861
9862 /*
9863 * Bend CLKOUT_DP
9864 * steps -50 to 50 inclusive, in steps of 5
9865 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9866 * change in clock period = -(steps / 10) * 5.787 ps
9867 */
9868 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9869 {
9870 u32 tmp;
9871 int idx = BEND_IDX(steps);
9872
9873 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
9874 return;
9875
9876 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
9877 return;
9878
9879 mutex_lock(&dev_priv->sb_lock);
9880
9881 if (steps % 10 != 0)
9882 tmp = 0xAAAAAAAB;
9883 else
9884 tmp = 0x00000000;
9885 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9886
9887 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9888 tmp &= 0xffff0000;
9889 tmp |= sscdivintphase[idx];
9890 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9891
9892 mutex_unlock(&dev_priv->sb_lock);
9893 }
9894
9895 #undef BEND_IDX
9896
9897 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9898 {
9899 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9900 u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9901
9902 if ((ctl & SPLL_PLL_ENABLE) == 0)
9903 return false;
9904
9905 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9906 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9907 return true;
9908
9909 if (IS_BROADWELL(dev_priv) &&
9910 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9911 return true;
9912
9913 return false;
9914 }
9915
9916 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9917 enum intel_dpll_id id)
9918 {
9919 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9920 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
9921
9922 if ((ctl & WRPLL_PLL_ENABLE) == 0)
9923 return false;
9924
9925 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9926 return true;
9927
9928 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9929 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9930 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9931 return true;
9932
9933 return false;
9934 }
9935
9936 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9937 {
9938 struct intel_encoder *encoder;
9939 bool has_fdi = false;
9940
9941 for_each_intel_encoder(&dev_priv->drm, encoder) {
9942 switch (encoder->type) {
9943 case INTEL_OUTPUT_ANALOG:
9944 has_fdi = true;
9945 break;
9946 default:
9947 break;
9948 }
9949 }
9950
9951 /*
9952 * The BIOS may have decided to use the PCH SSC
9953 * reference so we must not disable it until the
9954 * relevant PLLs have stopped relying on it. We'll
9955 * just leave the PCH SSC reference enabled in case
9956 * any active PLL is using it. It will get disabled
9957 * after runtime suspend if we don't have FDI.
9958 *
9959 * TODO: Move the whole reference clock handling
9960 * to the modeset sequence proper so that we can
9961 * actually enable/disable/reconfigure these things
9962 * safely. To do that we need to introduce a real
9963 * clock hierarchy. That would also allow us to do
9964 * clock bending finally.
9965 */
9966 dev_priv->pch_ssc_use = 0;
9967
9968 if (spll_uses_pch_ssc(dev_priv)) {
9969 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
9970 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9971 }
9972
9973 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9974 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
9975 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9976 }
9977
9978 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9979 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
9980 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9981 }
9982
9983 if (dev_priv->pch_ssc_use)
9984 return;
9985
9986 if (has_fdi) {
9987 lpt_bend_clkout_dp(dev_priv, 0);
9988 lpt_enable_clkout_dp(dev_priv, true, true);
9989 } else {
9990 lpt_disable_clkout_dp(dev_priv);
9991 }
9992 }
9993
9994 /*
9995 * Initialize reference clocks when the driver loads
9996 */
9997 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9998 {
9999 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
10000 ilk_init_pch_refclk(dev_priv);
10001 else if (HAS_PCH_LPT(dev_priv))
10002 lpt_init_pch_refclk(dev_priv);
10003 }
10004
10005 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
10006 {
10007 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10008 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10009 enum pipe pipe = crtc->pipe;
10010 u32 val;
10011
10012 val = 0;
10013
10014 switch (crtc_state->pipe_bpp) {
10015 case 18:
10016 val |= PIPECONF_6BPC;
10017 break;
10018 case 24:
10019 val |= PIPECONF_8BPC;
10020 break;
10021 case 30:
10022 val |= PIPECONF_10BPC;
10023 break;
10024 case 36:
10025 val |= PIPECONF_12BPC;
10026 break;
10027 default:
10028 /* Case prevented by intel_choose_pipe_bpp_dither. */
10029 BUG();
10030 }
10031
10032 if (crtc_state->dither)
10033 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10034
10035 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10036 val |= PIPECONF_INTERLACED_ILK;
10037 else
10038 val |= PIPECONF_PROGRESSIVE;
10039
10040 /*
10041 * This would end up with an odd purple hue over
10042 * the entire display. Make sure we don't do it.
10043 */
10044 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
10045 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
10046
10047 if (crtc_state->limited_color_range)
10048 val |= PIPECONF_COLOR_RANGE_SELECT;
10049
10050 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10051 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
10052
10053 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
10054
10055 val |= PIPECONF_FRAME_START_DELAY(0);
10056
10057 intel_de_write(dev_priv, PIPECONF(pipe), val);
10058 intel_de_posting_read(dev_priv, PIPECONF(pipe));
10059 }
10060
10061 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
10062 {
10063 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10064 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10065 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10066 u32 val = 0;
10067
10068 if (IS_HASWELL(dev_priv) && crtc_state->dither)
10069 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10070
10071 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10072 val |= PIPECONF_INTERLACED_ILK;
10073 else
10074 val |= PIPECONF_PROGRESSIVE;
10075
10076 if (IS_HASWELL(dev_priv) &&
10077 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10078 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
10079
10080 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
10081 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
10082 }
10083
10084 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
10085 {
10086 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10087 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10088 u32 val = 0;
10089
10090 switch (crtc_state->pipe_bpp) {
10091 case 18:
10092 val |= PIPEMISC_DITHER_6_BPC;
10093 break;
10094 case 24:
10095 val |= PIPEMISC_DITHER_8_BPC;
10096 break;
10097 case 30:
10098 val |= PIPEMISC_DITHER_10_BPC;
10099 break;
10100 case 36:
10101 val |= PIPEMISC_DITHER_12_BPC;
10102 break;
10103 default:
10104 MISSING_CASE(crtc_state->pipe_bpp);
10105 break;
10106 }
10107
10108 if (crtc_state->dither)
10109 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
10110
10111 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
10112 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
10113 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
10114
10115 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
10116 val |= PIPEMISC_YUV420_ENABLE |
10117 PIPEMISC_YUV420_MODE_FULL_BLEND;
10118
10119 if (INTEL_GEN(dev_priv) >= 11 &&
10120 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
10121 BIT(PLANE_CURSOR))) == 0)
10122 val |= PIPEMISC_HDR_MODE_PRECISION;
10123
10124 if (INTEL_GEN(dev_priv) >= 12)
10125 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
10126
10127 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
10128 }
10129
10130 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
10131 {
10132 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10133 u32 tmp;
10134
10135 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
10136
10137 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
10138 case PIPEMISC_DITHER_6_BPC:
10139 return 18;
10140 case PIPEMISC_DITHER_8_BPC:
10141 return 24;
10142 case PIPEMISC_DITHER_10_BPC:
10143 return 30;
10144 case PIPEMISC_DITHER_12_BPC:
10145 return 36;
10146 default:
10147 MISSING_CASE(tmp);
10148 return 0;
10149 }
10150 }
10151
10152 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
10153 {
10154 /*
10155 * Account for spread spectrum to avoid
10156 * oversubscribing the link. Max center spread
10157 * is 2.5%; use 5% for safety's sake.
10158 */
10159 u32 bps = target_clock * bpp * 21 / 20;
10160 return DIV_ROUND_UP(bps, link_bw * 8);
10161 }
10162
10163 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
10164 {
10165 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
10166 }
10167
10168 static void ilk_compute_dpll(struct intel_crtc *crtc,
10169 struct intel_crtc_state *crtc_state,
10170 struct dpll *reduced_clock)
10171 {
10172 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10173 u32 dpll, fp, fp2;
10174 int factor;
10175
10176 /* Enable autotuning of the PLL clock (if permissible) */
10177 factor = 21;
10178 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10179 if ((intel_panel_use_ssc(dev_priv) &&
10180 dev_priv->vbt.lvds_ssc_freq == 100000) ||
10181 (HAS_PCH_IBX(dev_priv) &&
10182 intel_is_dual_link_lvds(dev_priv)))
10183 factor = 25;
10184 } else if (crtc_state->sdvo_tv_clock) {
10185 factor = 20;
10186 }
10187
10188 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
10189
10190 if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
10191 fp |= FP_CB_TUNE;
10192
10193 if (reduced_clock) {
10194 fp2 = i9xx_dpll_compute_fp(reduced_clock);
10195
10196 if (reduced_clock->m < factor * reduced_clock->n)
10197 fp2 |= FP_CB_TUNE;
10198 } else {
10199 fp2 = fp;
10200 }
10201
10202 dpll = 0;
10203
10204 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10205 dpll |= DPLLB_MODE_LVDS;
10206 else
10207 dpll |= DPLLB_MODE_DAC_SERIAL;
10208
10209 dpll |= (crtc_state->pixel_multiplier - 1)
10210 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10211
10212 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10213 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10214 dpll |= DPLL_SDVO_HIGH_SPEED;
10215
10216 if (intel_crtc_has_dp_encoder(crtc_state))
10217 dpll |= DPLL_SDVO_HIGH_SPEED;
10218
10219 /*
10220 * The high speed IO clock is only really required for
10221 * SDVO/HDMI/DP, but we also enable it for CRT to make it
10222 * possible to share the DPLL between CRT and HDMI. Enabling
10223 * the clock needlessly does no real harm, except use up a
10224 * bit of power potentially.
10225 *
10226 * We'll limit this to IVB with 3 pipes, since it has only two
10227 * DPLLs and so DPLL sharing is the only way to get three pipes
10228 * driving PCH ports at the same time. On SNB we could do this,
10229 * and potentially avoid enabling the second DPLL, but it's not
10230 * clear if it''s a win or loss power wise. No point in doing
10231 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10232 */
10233 if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10234 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10235 dpll |= DPLL_SDVO_HIGH_SPEED;
10236
10237 /* compute bitmask from p1 value */
10238 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10239 /* also FPA1 */
10240 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10241
10242 switch (crtc_state->dpll.p2) {
10243 case 5:
10244 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10245 break;
10246 case 7:
10247 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10248 break;
10249 case 10:
10250 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10251 break;
10252 case 14:
10253 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10254 break;
10255 }
10256
10257 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10258 intel_panel_use_ssc(dev_priv))
10259 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10260 else
10261 dpll |= PLL_REF_INPUT_DREFCLK;
10262
10263 dpll |= DPLL_VCO_ENABLE;
10264
10265 crtc_state->dpll_hw_state.dpll = dpll;
10266 crtc_state->dpll_hw_state.fp0 = fp;
10267 crtc_state->dpll_hw_state.fp1 = fp2;
10268 }
10269
10270 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10271 struct intel_crtc_state *crtc_state)
10272 {
10273 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10274 struct intel_atomic_state *state =
10275 to_intel_atomic_state(crtc_state->uapi.state);
10276 const struct intel_limit *limit;
10277 int refclk = 120000;
10278
10279 memset(&crtc_state->dpll_hw_state, 0,
10280 sizeof(crtc_state->dpll_hw_state));
10281
10282 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10283 if (!crtc_state->has_pch_encoder)
10284 return 0;
10285
10286 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10287 if (intel_panel_use_ssc(dev_priv)) {
10288 drm_dbg_kms(&dev_priv->drm,
10289 "using SSC reference clock of %d kHz\n",
10290 dev_priv->vbt.lvds_ssc_freq);
10291 refclk = dev_priv->vbt.lvds_ssc_freq;
10292 }
10293
10294 if (intel_is_dual_link_lvds(dev_priv)) {
10295 if (refclk == 100000)
10296 limit = &ilk_limits_dual_lvds_100m;
10297 else
10298 limit = &ilk_limits_dual_lvds;
10299 } else {
10300 if (refclk == 100000)
10301 limit = &ilk_limits_single_lvds_100m;
10302 else
10303 limit = &ilk_limits_single_lvds;
10304 }
10305 } else {
10306 limit = &ilk_limits_dac;
10307 }
10308
10309 if (!crtc_state->clock_set &&
10310 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10311 refclk, NULL, &crtc_state->dpll)) {
10312 drm_err(&dev_priv->drm,
10313 "Couldn't find PLL settings for mode!\n");
10314 return -EINVAL;
10315 }
10316
10317 ilk_compute_dpll(crtc, crtc_state, NULL);
10318
10319 if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10320 drm_dbg_kms(&dev_priv->drm,
10321 "failed to find PLL for pipe %c\n",
10322 pipe_name(crtc->pipe));
10323 return -EINVAL;
10324 }
10325
10326 return 0;
10327 }
10328
10329 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10330 struct intel_link_m_n *m_n)
10331 {
10332 struct drm_device *dev = crtc->base.dev;
10333 struct drm_i915_private *dev_priv = to_i915(dev);
10334 enum pipe pipe = crtc->pipe;
10335
10336 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10337 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10338 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10339 & ~TU_SIZE_MASK;
10340 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10341 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10342 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10343 }
10344
10345 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10346 enum transcoder transcoder,
10347 struct intel_link_m_n *m_n,
10348 struct intel_link_m_n *m2_n2)
10349 {
10350 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10351 enum pipe pipe = crtc->pipe;
10352
10353 if (INTEL_GEN(dev_priv) >= 5) {
10354 m_n->link_m = intel_de_read(dev_priv,
10355 PIPE_LINK_M1(transcoder));
10356 m_n->link_n = intel_de_read(dev_priv,
10357 PIPE_LINK_N1(transcoder));
10358 m_n->gmch_m = intel_de_read(dev_priv,
10359 PIPE_DATA_M1(transcoder))
10360 & ~TU_SIZE_MASK;
10361 m_n->gmch_n = intel_de_read(dev_priv,
10362 PIPE_DATA_N1(transcoder));
10363 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10364 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10365
10366 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10367 m2_n2->link_m = intel_de_read(dev_priv,
10368 PIPE_LINK_M2(transcoder));
10369 m2_n2->link_n = intel_de_read(dev_priv,
10370 PIPE_LINK_N2(transcoder));
10371 m2_n2->gmch_m = intel_de_read(dev_priv,
10372 PIPE_DATA_M2(transcoder))
10373 & ~TU_SIZE_MASK;
10374 m2_n2->gmch_n = intel_de_read(dev_priv,
10375 PIPE_DATA_N2(transcoder));
10376 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10377 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10378 }
10379 } else {
10380 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10381 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10382 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10383 & ~TU_SIZE_MASK;
10384 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10385 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10386 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10387 }
10388 }
10389
10390 void intel_dp_get_m_n(struct intel_crtc *crtc,
10391 struct intel_crtc_state *pipe_config)
10392 {
10393 if (pipe_config->has_pch_encoder)
10394 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10395 else
10396 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10397 &pipe_config->dp_m_n,
10398 &pipe_config->dp_m2_n2);
10399 }
10400
10401 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10402 struct intel_crtc_state *pipe_config)
10403 {
10404 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10405 &pipe_config->fdi_m_n, NULL);
10406 }
10407
10408 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
10409 u32 pos, u32 size)
10410 {
10411 drm_rect_init(&crtc_state->pch_pfit.dst,
10412 pos >> 16, pos & 0xffff,
10413 size >> 16, size & 0xffff);
10414 }
10415
10416 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
10417 {
10418 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10419 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10420 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
10421 int id = -1;
10422 int i;
10423
10424 /* find scaler attached to this pipe */
10425 for (i = 0; i < crtc->num_scalers; i++) {
10426 u32 ctl, pos, size;
10427
10428 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10429 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
10430 continue;
10431
10432 id = i;
10433 crtc_state->pch_pfit.enabled = true;
10434
10435 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
10436 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
10437
10438 ilk_get_pfit_pos_size(crtc_state, pos, size);
10439
10440 scaler_state->scalers[i].in_use = true;
10441 break;
10442 }
10443
10444 scaler_state->scaler_id = id;
10445 if (id >= 0)
10446 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10447 else
10448 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10449 }
10450
10451 static void
10452 skl_get_initial_plane_config(struct intel_crtc *crtc,
10453 struct intel_initial_plane_config *plane_config)
10454 {
10455 struct drm_device *dev = crtc->base.dev;
10456 struct drm_i915_private *dev_priv = to_i915(dev);
10457 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10458 enum plane_id plane_id = plane->id;
10459 enum pipe pipe;
10460 u32 val, base, offset, stride_mult, tiling, alpha;
10461 int fourcc, pixel_format;
10462 unsigned int aligned_height;
10463 struct drm_framebuffer *fb;
10464 struct intel_framebuffer *intel_fb;
10465
10466 if (!plane->get_hw_state(plane, &pipe))
10467 return;
10468
10469 drm_WARN_ON(dev, pipe != crtc->pipe);
10470
10471 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10472 if (!intel_fb) {
10473 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10474 return;
10475 }
10476
10477 fb = &intel_fb->base;
10478
10479 fb->dev = dev;
10480
10481 val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10482
10483 if (INTEL_GEN(dev_priv) >= 11)
10484 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10485 else
10486 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10487
10488 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10489 alpha = intel_de_read(dev_priv,
10490 PLANE_COLOR_CTL(pipe, plane_id));
10491 alpha &= PLANE_COLOR_ALPHA_MASK;
10492 } else {
10493 alpha = val & PLANE_CTL_ALPHA_MASK;
10494 }
10495
10496 fourcc = skl_format_to_fourcc(pixel_format,
10497 val & PLANE_CTL_ORDER_RGBX, alpha);
10498 fb->format = drm_format_info(fourcc);
10499
10500 tiling = val & PLANE_CTL_TILED_MASK;
10501 switch (tiling) {
10502 case PLANE_CTL_TILED_LINEAR:
10503 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10504 break;
10505 case PLANE_CTL_TILED_X:
10506 plane_config->tiling = I915_TILING_X;
10507 fb->modifier = I915_FORMAT_MOD_X_TILED;
10508 break;
10509 case PLANE_CTL_TILED_Y:
10510 plane_config->tiling = I915_TILING_Y;
10511 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10512 fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10513 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10514 I915_FORMAT_MOD_Y_TILED_CCS;
10515 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10516 fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10517 else
10518 fb->modifier = I915_FORMAT_MOD_Y_TILED;
10519 break;
10520 case PLANE_CTL_TILED_YF:
10521 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10522 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10523 else
10524 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10525 break;
10526 default:
10527 MISSING_CASE(tiling);
10528 goto error;
10529 }
10530
10531 /*
10532 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10533 * while i915 HW rotation is clockwise, thats why this swapping.
10534 */
10535 switch (val & PLANE_CTL_ROTATE_MASK) {
10536 case PLANE_CTL_ROTATE_0:
10537 plane_config->rotation = DRM_MODE_ROTATE_0;
10538 break;
10539 case PLANE_CTL_ROTATE_90:
10540 plane_config->rotation = DRM_MODE_ROTATE_270;
10541 break;
10542 case PLANE_CTL_ROTATE_180:
10543 plane_config->rotation = DRM_MODE_ROTATE_180;
10544 break;
10545 case PLANE_CTL_ROTATE_270:
10546 plane_config->rotation = DRM_MODE_ROTATE_90;
10547 break;
10548 }
10549
10550 if (INTEL_GEN(dev_priv) >= 10 &&
10551 val & PLANE_CTL_FLIP_HORIZONTAL)
10552 plane_config->rotation |= DRM_MODE_REFLECT_X;
10553
10554 base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10555 plane_config->base = base;
10556
10557 offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10558
10559 val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10560 fb->height = ((val >> 16) & 0xffff) + 1;
10561 fb->width = ((val >> 0) & 0xffff) + 1;
10562
10563 val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10564 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10565 fb->pitches[0] = (val & 0x3ff) * stride_mult;
10566
10567 aligned_height = intel_fb_align_height(fb, 0, fb->height);
10568
10569 plane_config->size = fb->pitches[0] * aligned_height;
10570
10571 drm_dbg_kms(&dev_priv->drm,
10572 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10573 crtc->base.name, plane->base.name, fb->width, fb->height,
10574 fb->format->cpp[0] * 8, base, fb->pitches[0],
10575 plane_config->size);
10576
10577 plane_config->fb = intel_fb;
10578 return;
10579
10580 error:
10581 kfree(intel_fb);
10582 }
10583
10584 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
10585 {
10586 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10587 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10588 u32 ctl, pos, size;
10589
10590 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10591 if ((ctl & PF_ENABLE) == 0)
10592 return;
10593
10594 crtc_state->pch_pfit.enabled = true;
10595
10596 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
10597 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
10598
10599 ilk_get_pfit_pos_size(crtc_state, pos, size);
10600
10601 /*
10602 * We currently do not free assignements of panel fitters on
10603 * ivb/hsw (since we don't use the higher upscaling modes which
10604 * differentiates them) so just WARN about this case for now.
10605 */
10606 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
10607 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
10608 }
10609
10610 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10611 struct intel_crtc_state *pipe_config)
10612 {
10613 struct drm_device *dev = crtc->base.dev;
10614 struct drm_i915_private *dev_priv = to_i915(dev);
10615 enum intel_display_power_domain power_domain;
10616 intel_wakeref_t wakeref;
10617 u32 tmp;
10618 bool ret;
10619
10620 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10621 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10622 if (!wakeref)
10623 return false;
10624
10625 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10626 pipe_config->shared_dpll = NULL;
10627
10628 ret = false;
10629 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10630 if (!(tmp & PIPECONF_ENABLE))
10631 goto out;
10632
10633 switch (tmp & PIPECONF_BPC_MASK) {
10634 case PIPECONF_6BPC:
10635 pipe_config->pipe_bpp = 18;
10636 break;
10637 case PIPECONF_8BPC:
10638 pipe_config->pipe_bpp = 24;
10639 break;
10640 case PIPECONF_10BPC:
10641 pipe_config->pipe_bpp = 30;
10642 break;
10643 case PIPECONF_12BPC:
10644 pipe_config->pipe_bpp = 36;
10645 break;
10646 default:
10647 break;
10648 }
10649
10650 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10651 pipe_config->limited_color_range = true;
10652
10653 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10654 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10655 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10656 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10657 break;
10658 default:
10659 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10660 break;
10661 }
10662
10663 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10664 PIPECONF_GAMMA_MODE_SHIFT;
10665
10666 pipe_config->csc_mode = intel_de_read(dev_priv,
10667 PIPE_CSC_MODE(crtc->pipe));
10668
10669 i9xx_get_pipe_color_config(pipe_config);
10670 intel_color_get_config(pipe_config);
10671
10672 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10673 struct intel_shared_dpll *pll;
10674 enum intel_dpll_id pll_id;
10675
10676 pipe_config->has_pch_encoder = true;
10677
10678 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10679 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10680 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10681
10682 ilk_get_fdi_m_n_config(crtc, pipe_config);
10683
10684 if (HAS_PCH_IBX(dev_priv)) {
10685 /*
10686 * The pipe->pch transcoder and pch transcoder->pll
10687 * mapping is fixed.
10688 */
10689 pll_id = (enum intel_dpll_id) crtc->pipe;
10690 } else {
10691 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10692 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10693 pll_id = DPLL_ID_PCH_PLL_B;
10694 else
10695 pll_id= DPLL_ID_PCH_PLL_A;
10696 }
10697
10698 pipe_config->shared_dpll =
10699 intel_get_shared_dpll_by_id(dev_priv, pll_id);
10700 pll = pipe_config->shared_dpll;
10701
10702 drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
10703 &pipe_config->dpll_hw_state));
10704
10705 tmp = pipe_config->dpll_hw_state.dpll;
10706 pipe_config->pixel_multiplier =
10707 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10708 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10709
10710 ilk_pch_clock_get(crtc, pipe_config);
10711 } else {
10712 pipe_config->pixel_multiplier = 1;
10713 }
10714
10715 intel_get_pipe_timings(crtc, pipe_config);
10716 intel_get_pipe_src_size(crtc, pipe_config);
10717
10718 ilk_get_pfit_config(pipe_config);
10719
10720 ret = true;
10721
10722 out:
10723 intel_display_power_put(dev_priv, power_domain, wakeref);
10724
10725 return ret;
10726 }
10727
10728 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10729 struct intel_crtc_state *crtc_state)
10730 {
10731 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10732 struct intel_atomic_state *state =
10733 to_intel_atomic_state(crtc_state->uapi.state);
10734
10735 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10736 INTEL_GEN(dev_priv) >= 11) {
10737 struct intel_encoder *encoder =
10738 intel_get_crtc_new_encoder(state, crtc_state);
10739
10740 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10741 drm_dbg_kms(&dev_priv->drm,
10742 "failed to find PLL for pipe %c\n",
10743 pipe_name(crtc->pipe));
10744 return -EINVAL;
10745 }
10746 }
10747
10748 return 0;
10749 }
10750
10751 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10752 struct intel_crtc_state *pipe_config)
10753 {
10754 enum intel_dpll_id id;
10755 u32 temp;
10756
10757 temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10758 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10759
10760 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
10761 return;
10762
10763 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10764 }
10765
10766 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10767 struct intel_crtc_state *pipe_config)
10768 {
10769 enum phy phy = intel_port_to_phy(dev_priv, port);
10770 enum icl_port_dpll_id port_dpll_id;
10771 enum intel_dpll_id id;
10772 u32 temp;
10773
10774 if (intel_phy_is_combo(dev_priv, phy)) {
10775 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
10776 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10777 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10778 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10779 } else if (intel_phy_is_tc(dev_priv, phy)) {
10780 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10781
10782 if (clk_sel == DDI_CLK_SEL_MG) {
10783 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10784 port));
10785 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10786 } else {
10787 drm_WARN_ON(&dev_priv->drm,
10788 clk_sel < DDI_CLK_SEL_TBT_162);
10789 id = DPLL_ID_ICL_TBTPLL;
10790 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10791 }
10792 } else {
10793 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
10794 return;
10795 }
10796
10797 pipe_config->icl_port_dplls[port_dpll_id].pll =
10798 intel_get_shared_dpll_by_id(dev_priv, id);
10799
10800 icl_set_active_port_dpll(pipe_config, port_dpll_id);
10801 }
10802
10803 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10804 enum port port,
10805 struct intel_crtc_state *pipe_config)
10806 {
10807 enum intel_dpll_id id;
10808
10809 switch (port) {
10810 case PORT_A:
10811 id = DPLL_ID_SKL_DPLL0;
10812 break;
10813 case PORT_B:
10814 id = DPLL_ID_SKL_DPLL1;
10815 break;
10816 case PORT_C:
10817 id = DPLL_ID_SKL_DPLL2;
10818 break;
10819 default:
10820 drm_err(&dev_priv->drm, "Incorrect port type\n");
10821 return;
10822 }
10823
10824 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10825 }
10826
10827 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10828 struct intel_crtc_state *pipe_config)
10829 {
10830 enum intel_dpll_id id;
10831 u32 temp;
10832
10833 temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10834 id = temp >> (port * 3 + 1);
10835
10836 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
10837 return;
10838
10839 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10840 }
10841
10842 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10843 struct intel_crtc_state *pipe_config)
10844 {
10845 enum intel_dpll_id id;
10846 u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10847
10848 switch (ddi_pll_sel) {
10849 case PORT_CLK_SEL_WRPLL1:
10850 id = DPLL_ID_WRPLL1;
10851 break;
10852 case PORT_CLK_SEL_WRPLL2:
10853 id = DPLL_ID_WRPLL2;
10854 break;
10855 case PORT_CLK_SEL_SPLL:
10856 id = DPLL_ID_SPLL;
10857 break;
10858 case PORT_CLK_SEL_LCPLL_810:
10859 id = DPLL_ID_LCPLL_810;
10860 break;
10861 case PORT_CLK_SEL_LCPLL_1350:
10862 id = DPLL_ID_LCPLL_1350;
10863 break;
10864 case PORT_CLK_SEL_LCPLL_2700:
10865 id = DPLL_ID_LCPLL_2700;
10866 break;
10867 default:
10868 MISSING_CASE(ddi_pll_sel);
10869 /* fall through */
10870 case PORT_CLK_SEL_NONE:
10871 return;
10872 }
10873
10874 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10875 }
10876
10877 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10878 struct intel_crtc_state *pipe_config,
10879 u64 *power_domain_mask,
10880 intel_wakeref_t *wakerefs)
10881 {
10882 struct drm_device *dev = crtc->base.dev;
10883 struct drm_i915_private *dev_priv = to_i915(dev);
10884 enum intel_display_power_domain power_domain;
10885 unsigned long panel_transcoder_mask = 0;
10886 unsigned long enabled_panel_transcoders = 0;
10887 enum transcoder panel_transcoder;
10888 intel_wakeref_t wf;
10889 u32 tmp;
10890
10891 if (INTEL_GEN(dev_priv) >= 11)
10892 panel_transcoder_mask |=
10893 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10894
10895 if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
10896 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10897
10898 /*
10899 * The pipe->transcoder mapping is fixed with the exception of the eDP
10900 * and DSI transcoders handled below.
10901 */
10902 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10903
10904 /*
10905 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10906 * consistency and less surprising code; it's in always on power).
10907 */
10908 for_each_set_bit(panel_transcoder,
10909 &panel_transcoder_mask,
10910 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10911 bool force_thru = false;
10912 enum pipe trans_pipe;
10913
10914 tmp = intel_de_read(dev_priv,
10915 TRANS_DDI_FUNC_CTL(panel_transcoder));
10916 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10917 continue;
10918
10919 /*
10920 * Log all enabled ones, only use the first one.
10921 *
10922 * FIXME: This won't work for two separate DSI displays.
10923 */
10924 enabled_panel_transcoders |= BIT(panel_transcoder);
10925 if (enabled_panel_transcoders != BIT(panel_transcoder))
10926 continue;
10927
10928 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10929 default:
10930 drm_WARN(dev, 1,
10931 "unknown pipe linked to transcoder %s\n",
10932 transcoder_name(panel_transcoder));
10933 /* fall through */
10934 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10935 force_thru = true;
10936 /* fall through */
10937 case TRANS_DDI_EDP_INPUT_A_ON:
10938 trans_pipe = PIPE_A;
10939 break;
10940 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10941 trans_pipe = PIPE_B;
10942 break;
10943 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10944 trans_pipe = PIPE_C;
10945 break;
10946 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10947 trans_pipe = PIPE_D;
10948 break;
10949 }
10950
10951 if (trans_pipe == crtc->pipe) {
10952 pipe_config->cpu_transcoder = panel_transcoder;
10953 pipe_config->pch_pfit.force_thru = force_thru;
10954 }
10955 }
10956
10957 /*
10958 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10959 */
10960 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10961 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10962
10963 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10964 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
10965
10966 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10967 if (!wf)
10968 return false;
10969
10970 wakerefs[power_domain] = wf;
10971 *power_domain_mask |= BIT_ULL(power_domain);
10972
10973 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
10974
10975 return tmp & PIPECONF_ENABLE;
10976 }
10977
10978 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10979 struct intel_crtc_state *pipe_config,
10980 u64 *power_domain_mask,
10981 intel_wakeref_t *wakerefs)
10982 {
10983 struct drm_device *dev = crtc->base.dev;
10984 struct drm_i915_private *dev_priv = to_i915(dev);
10985 enum intel_display_power_domain power_domain;
10986 enum transcoder cpu_transcoder;
10987 intel_wakeref_t wf;
10988 enum port port;
10989 u32 tmp;
10990
10991 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10992 if (port == PORT_A)
10993 cpu_transcoder = TRANSCODER_DSI_A;
10994 else
10995 cpu_transcoder = TRANSCODER_DSI_C;
10996
10997 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10998 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
10999
11000 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11001 if (!wf)
11002 continue;
11003
11004 wakerefs[power_domain] = wf;
11005 *power_domain_mask |= BIT_ULL(power_domain);
11006
11007 /*
11008 * The PLL needs to be enabled with a valid divider
11009 * configuration, otherwise accessing DSI registers will hang
11010 * the machine. See BSpec North Display Engine
11011 * registers/MIPI[BXT]. We can break out here early, since we
11012 * need the same DSI PLL to be enabled for both DSI ports.
11013 */
11014 if (!bxt_dsi_pll_is_enabled(dev_priv))
11015 break;
11016
11017 /* XXX: this works for video mode only */
11018 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
11019 if (!(tmp & DPI_ENABLE))
11020 continue;
11021
11022 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
11023 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
11024 continue;
11025
11026 pipe_config->cpu_transcoder = cpu_transcoder;
11027 break;
11028 }
11029
11030 return transcoder_is_dsi(pipe_config->cpu_transcoder);
11031 }
11032
11033 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
11034 struct intel_crtc_state *pipe_config)
11035 {
11036 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11037 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
11038 struct intel_shared_dpll *pll;
11039 enum port port;
11040 u32 tmp;
11041
11042 if (transcoder_is_dsi(cpu_transcoder)) {
11043 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
11044 PORT_A : PORT_B;
11045 } else {
11046 tmp = intel_de_read(dev_priv,
11047 TRANS_DDI_FUNC_CTL(cpu_transcoder));
11048 if (INTEL_GEN(dev_priv) >= 12)
11049 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11050 else
11051 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11052 }
11053
11054 if (INTEL_GEN(dev_priv) >= 11)
11055 icl_get_ddi_pll(dev_priv, port, pipe_config);
11056 else if (IS_CANNONLAKE(dev_priv))
11057 cnl_get_ddi_pll(dev_priv, port, pipe_config);
11058 else if (IS_GEN9_BC(dev_priv))
11059 skl_get_ddi_pll(dev_priv, port, pipe_config);
11060 else if (IS_GEN9_LP(dev_priv))
11061 bxt_get_ddi_pll(dev_priv, port, pipe_config);
11062 else
11063 hsw_get_ddi_pll(dev_priv, port, pipe_config);
11064
11065 pll = pipe_config->shared_dpll;
11066 if (pll) {
11067 drm_WARN_ON(&dev_priv->drm,
11068 !pll->info->funcs->get_hw_state(dev_priv, pll,
11069 &pipe_config->dpll_hw_state));
11070 }
11071
11072 /*
11073 * Haswell has only FDI/PCH transcoder A. It is which is connected to
11074 * DDI E. So just check whether this pipe is wired to DDI E and whether
11075 * the PCH transcoder is on.
11076 */
11077 if (INTEL_GEN(dev_priv) < 9 &&
11078 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
11079 pipe_config->has_pch_encoder = true;
11080
11081 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
11082 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
11083 FDI_DP_PORT_WIDTH_SHIFT) + 1;
11084
11085 ilk_get_fdi_m_n_config(crtc, pipe_config);
11086 }
11087 }
11088
11089 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
11090 struct intel_crtc_state *pipe_config)
11091 {
11092 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11093 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
11094 enum intel_display_power_domain power_domain;
11095 u64 power_domain_mask;
11096 bool active;
11097 u32 tmp;
11098
11099 pipe_config->master_transcoder = INVALID_TRANSCODER;
11100
11101 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
11102 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11103 if (!wf)
11104 return false;
11105
11106 wakerefs[power_domain] = wf;
11107 power_domain_mask = BIT_ULL(power_domain);
11108
11109 pipe_config->shared_dpll = NULL;
11110
11111 active = hsw_get_transcoder_state(crtc, pipe_config,
11112 &power_domain_mask, wakerefs);
11113
11114 if (IS_GEN9_LP(dev_priv) &&
11115 bxt_get_dsi_transcoder_state(crtc, pipe_config,
11116 &power_domain_mask, wakerefs)) {
11117 drm_WARN_ON(&dev_priv->drm, active);
11118 active = true;
11119 }
11120
11121 if (!active)
11122 goto out;
11123
11124 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
11125 INTEL_GEN(dev_priv) >= 11) {
11126 hsw_get_ddi_port_state(crtc, pipe_config);
11127 intel_get_pipe_timings(crtc, pipe_config);
11128 }
11129
11130 intel_get_pipe_src_size(crtc, pipe_config);
11131
11132 if (IS_HASWELL(dev_priv)) {
11133 u32 tmp = intel_de_read(dev_priv,
11134 PIPECONF(pipe_config->cpu_transcoder));
11135
11136 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
11137 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
11138 else
11139 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
11140 } else {
11141 pipe_config->output_format =
11142 bdw_get_pipemisc_output_format(crtc);
11143
11144 /*
11145 * Currently there is no interface defined to
11146 * check user preference between RGB/YCBCR444
11147 * or YCBCR420. So the only possible case for
11148 * YCBCR444 usage is driving YCBCR420 output
11149 * with LSPCON, when pipe is configured for
11150 * YCBCR444 output and LSPCON takes care of
11151 * downsampling it.
11152 */
11153 pipe_config->lspcon_downsampling =
11154 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
11155 }
11156
11157 pipe_config->gamma_mode = intel_de_read(dev_priv,
11158 GAMMA_MODE(crtc->pipe));
11159
11160 pipe_config->csc_mode = intel_de_read(dev_priv,
11161 PIPE_CSC_MODE(crtc->pipe));
11162
11163 if (INTEL_GEN(dev_priv) >= 9) {
11164 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11165
11166 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11167 pipe_config->gamma_enable = true;
11168
11169 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11170 pipe_config->csc_enable = true;
11171 } else {
11172 i9xx_get_pipe_color_config(pipe_config);
11173 }
11174
11175 intel_color_get_config(pipe_config);
11176
11177 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11178 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11179 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11180 pipe_config->ips_linetime =
11181 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11182
11183 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
11184 drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
11185
11186 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11187 if (wf) {
11188 wakerefs[power_domain] = wf;
11189 power_domain_mask |= BIT_ULL(power_domain);
11190
11191 if (INTEL_GEN(dev_priv) >= 9)
11192 skl_get_pfit_config(pipe_config);
11193 else
11194 ilk_get_pfit_config(pipe_config);
11195 }
11196
11197 if (hsw_crtc_supports_ips(crtc)) {
11198 if (IS_HASWELL(dev_priv))
11199 pipe_config->ips_enabled = intel_de_read(dev_priv,
11200 IPS_CTL) & IPS_ENABLE;
11201 else {
11202 /*
11203 * We cannot readout IPS state on broadwell, set to
11204 * true so we can set it to a defined state on first
11205 * commit.
11206 */
11207 pipe_config->ips_enabled = true;
11208 }
11209 }
11210
11211 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11212 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11213 pipe_config->pixel_multiplier =
11214 intel_de_read(dev_priv,
11215 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11216 } else {
11217 pipe_config->pixel_multiplier = 1;
11218 }
11219
11220 out:
11221 for_each_power_domain(power_domain, power_domain_mask)
11222 intel_display_power_put(dev_priv,
11223 power_domain, wakerefs[power_domain]);
11224
11225 return active;
11226 }
11227
11228 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
11229 {
11230 struct drm_i915_private *dev_priv =
11231 to_i915(plane_state->uapi.plane->dev);
11232 const struct drm_framebuffer *fb = plane_state->hw.fb;
11233 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11234 u32 base;
11235
11236 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
11237 base = sg_dma_address(obj->mm.pages->sgl);
11238 else
11239 base = intel_plane_ggtt_offset(plane_state);
11240
11241 return base + plane_state->color_plane[0].offset;
11242 }
11243
11244 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
11245 {
11246 int x = plane_state->uapi.dst.x1;
11247 int y = plane_state->uapi.dst.y1;
11248 u32 pos = 0;
11249
11250 if (x < 0) {
11251 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
11252 x = -x;
11253 }
11254 pos |= x << CURSOR_X_SHIFT;
11255
11256 if (y < 0) {
11257 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
11258 y = -y;
11259 }
11260 pos |= y << CURSOR_Y_SHIFT;
11261
11262 return pos;
11263 }
11264
11265 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
11266 {
11267 const struct drm_mode_config *config =
11268 &plane_state->uapi.plane->dev->mode_config;
11269 int width = drm_rect_width(&plane_state->uapi.dst);
11270 int height = drm_rect_height(&plane_state->uapi.dst);
11271
11272 return width > 0 && width <= config->cursor_width &&
11273 height > 0 && height <= config->cursor_height;
11274 }
11275
11276 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
11277 {
11278 struct drm_i915_private *dev_priv =
11279 to_i915(plane_state->uapi.plane->dev);
11280 unsigned int rotation = plane_state->hw.rotation;
11281 int src_x, src_y;
11282 u32 offset;
11283 int ret;
11284
11285 ret = intel_plane_compute_gtt(plane_state);
11286 if (ret)
11287 return ret;
11288
11289 if (!plane_state->uapi.visible)
11290 return 0;
11291
11292 src_x = plane_state->uapi.src.x1 >> 16;
11293 src_y = plane_state->uapi.src.y1 >> 16;
11294
11295 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
11296 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
11297 plane_state, 0);
11298
11299 if (src_x != 0 || src_y != 0) {
11300 drm_dbg_kms(&dev_priv->drm,
11301 "Arbitrary cursor panning not supported\n");
11302 return -EINVAL;
11303 }
11304
11305 /*
11306 * Put the final coordinates back so that the src
11307 * coordinate checks will see the right values.
11308 */
11309 drm_rect_translate_to(&plane_state->uapi.src,
11310 src_x << 16, src_y << 16);
11311
11312 /* ILK+ do this automagically in hardware */
11313 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
11314 const struct drm_framebuffer *fb = plane_state->hw.fb;
11315 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
11316 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
11317
11318 offset += (src_h * src_w - 1) * fb->format->cpp[0];
11319 }
11320
11321 plane_state->color_plane[0].offset = offset;
11322 plane_state->color_plane[0].x = src_x;
11323 plane_state->color_plane[0].y = src_y;
11324
11325 return 0;
11326 }
11327
11328 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11329 struct intel_plane_state *plane_state)
11330 {
11331 const struct drm_framebuffer *fb = plane_state->hw.fb;
11332 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11333 int ret;
11334
11335 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11336 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
11337 return -EINVAL;
11338 }
11339
11340 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11341 &crtc_state->uapi,
11342 DRM_PLANE_HELPER_NO_SCALING,
11343 DRM_PLANE_HELPER_NO_SCALING,
11344 true, true);
11345 if (ret)
11346 return ret;
11347
11348 /* Use the unclipped src/dst rectangles, which we program to hw */
11349 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11350 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11351
11352 ret = intel_cursor_check_surface(plane_state);
11353 if (ret)
11354 return ret;
11355
11356 if (!plane_state->uapi.visible)
11357 return 0;
11358
11359 ret = intel_plane_check_src_coordinates(plane_state);
11360 if (ret)
11361 return ret;
11362
11363 return 0;
11364 }
11365
11366 static unsigned int
11367 i845_cursor_max_stride(struct intel_plane *plane,
11368 u32 pixel_format, u64 modifier,
11369 unsigned int rotation)
11370 {
11371 return 2048;
11372 }
11373
11374 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11375 {
11376 u32 cntl = 0;
11377
11378 if (crtc_state->gamma_enable)
11379 cntl |= CURSOR_GAMMA_ENABLE;
11380
11381 return cntl;
11382 }
11383
11384 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11385 const struct intel_plane_state *plane_state)
11386 {
11387 return CURSOR_ENABLE |
11388 CURSOR_FORMAT_ARGB |
11389 CURSOR_STRIDE(plane_state->color_plane[0].stride);
11390 }
11391
11392 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11393 {
11394 int width = drm_rect_width(&plane_state->uapi.dst);
11395
11396 /*
11397 * 845g/865g are only limited by the width of their cursors,
11398 * the height is arbitrary up to the precision of the register.
11399 */
11400 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11401 }
11402
11403 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11404 struct intel_plane_state *plane_state)
11405 {
11406 const struct drm_framebuffer *fb = plane_state->hw.fb;
11407 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11408 int ret;
11409
11410 ret = intel_check_cursor(crtc_state, plane_state);
11411 if (ret)
11412 return ret;
11413
11414 /* if we want to turn off the cursor ignore width and height */
11415 if (!fb)
11416 return 0;
11417
11418 /* Check for which cursor types we support */
11419 if (!i845_cursor_size_ok(plane_state)) {
11420 drm_dbg_kms(&i915->drm,
11421 "Cursor dimension %dx%d not supported\n",
11422 drm_rect_width(&plane_state->uapi.dst),
11423 drm_rect_height(&plane_state->uapi.dst));
11424 return -EINVAL;
11425 }
11426
11427 drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
11428 plane_state->color_plane[0].stride != fb->pitches[0]);
11429
11430 switch (fb->pitches[0]) {
11431 case 256:
11432 case 512:
11433 case 1024:
11434 case 2048:
11435 break;
11436 default:
11437 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
11438 fb->pitches[0]);
11439 return -EINVAL;
11440 }
11441
11442 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11443
11444 return 0;
11445 }
11446
11447 static void i845_update_cursor(struct intel_plane *plane,
11448 const struct intel_crtc_state *crtc_state,
11449 const struct intel_plane_state *plane_state)
11450 {
11451 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11452 u32 cntl = 0, base = 0, pos = 0, size = 0;
11453 unsigned long irqflags;
11454
11455 if (plane_state && plane_state->uapi.visible) {
11456 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11457 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11458
11459 cntl = plane_state->ctl |
11460 i845_cursor_ctl_crtc(crtc_state);
11461
11462 size = (height << 12) | width;
11463
11464 base = intel_cursor_base(plane_state);
11465 pos = intel_cursor_position(plane_state);
11466 }
11467
11468 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11469
11470 /* On these chipsets we can only modify the base/size/stride
11471 * whilst the cursor is disabled.
11472 */
11473 if (plane->cursor.base != base ||
11474 plane->cursor.size != size ||
11475 plane->cursor.cntl != cntl) {
11476 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
11477 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
11478 intel_de_write_fw(dev_priv, CURSIZE, size);
11479 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11480 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
11481
11482 plane->cursor.base = base;
11483 plane->cursor.size = size;
11484 plane->cursor.cntl = cntl;
11485 } else {
11486 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11487 }
11488
11489 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11490 }
11491
11492 static void i845_disable_cursor(struct intel_plane *plane,
11493 const struct intel_crtc_state *crtc_state)
11494 {
11495 i845_update_cursor(plane, crtc_state, NULL);
11496 }
11497
11498 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11499 enum pipe *pipe)
11500 {
11501 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11502 enum intel_display_power_domain power_domain;
11503 intel_wakeref_t wakeref;
11504 bool ret;
11505
11506 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11507 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11508 if (!wakeref)
11509 return false;
11510
11511 ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11512
11513 *pipe = PIPE_A;
11514
11515 intel_display_power_put(dev_priv, power_domain, wakeref);
11516
11517 return ret;
11518 }
11519
11520 static unsigned int
11521 i9xx_cursor_max_stride(struct intel_plane *plane,
11522 u32 pixel_format, u64 modifier,
11523 unsigned int rotation)
11524 {
11525 return plane->base.dev->mode_config.cursor_width * 4;
11526 }
11527
11528 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11529 {
11530 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11531 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11532 u32 cntl = 0;
11533
11534 if (INTEL_GEN(dev_priv) >= 11)
11535 return cntl;
11536
11537 if (crtc_state->gamma_enable)
11538 cntl = MCURSOR_GAMMA_ENABLE;
11539
11540 if (crtc_state->csc_enable)
11541 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11542
11543 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11544 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11545
11546 return cntl;
11547 }
11548
11549 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11550 const struct intel_plane_state *plane_state)
11551 {
11552 struct drm_i915_private *dev_priv =
11553 to_i915(plane_state->uapi.plane->dev);
11554 u32 cntl = 0;
11555
11556 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11557 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11558
11559 switch (drm_rect_width(&plane_state->uapi.dst)) {
11560 case 64:
11561 cntl |= MCURSOR_MODE_64_ARGB_AX;
11562 break;
11563 case 128:
11564 cntl |= MCURSOR_MODE_128_ARGB_AX;
11565 break;
11566 case 256:
11567 cntl |= MCURSOR_MODE_256_ARGB_AX;
11568 break;
11569 default:
11570 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11571 return 0;
11572 }
11573
11574 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11575 cntl |= MCURSOR_ROTATE_180;
11576
11577 return cntl;
11578 }
11579
11580 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11581 {
11582 struct drm_i915_private *dev_priv =
11583 to_i915(plane_state->uapi.plane->dev);
11584 int width = drm_rect_width(&plane_state->uapi.dst);
11585 int height = drm_rect_height(&plane_state->uapi.dst);
11586
11587 if (!intel_cursor_size_ok(plane_state))
11588 return false;
11589
11590 /* Cursor width is limited to a few power-of-two sizes */
11591 switch (width) {
11592 case 256:
11593 case 128:
11594 case 64:
11595 break;
11596 default:
11597 return false;
11598 }
11599
11600 /*
11601 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11602 * height from 8 lines up to the cursor width, when the
11603 * cursor is not rotated. Everything else requires square
11604 * cursors.
11605 */
11606 if (HAS_CUR_FBC(dev_priv) &&
11607 plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11608 if (height < 8 || height > width)
11609 return false;
11610 } else {
11611 if (height != width)
11612 return false;
11613 }
11614
11615 return true;
11616 }
11617
11618 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11619 struct intel_plane_state *plane_state)
11620 {
11621 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11622 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11623 const struct drm_framebuffer *fb = plane_state->hw.fb;
11624 enum pipe pipe = plane->pipe;
11625 int ret;
11626
11627 ret = intel_check_cursor(crtc_state, plane_state);
11628 if (ret)
11629 return ret;
11630
11631 /* if we want to turn off the cursor ignore width and height */
11632 if (!fb)
11633 return 0;
11634
11635 /* Check for which cursor types we support */
11636 if (!i9xx_cursor_size_ok(plane_state)) {
11637 drm_dbg(&dev_priv->drm,
11638 "Cursor dimension %dx%d not supported\n",
11639 drm_rect_width(&plane_state->uapi.dst),
11640 drm_rect_height(&plane_state->uapi.dst));
11641 return -EINVAL;
11642 }
11643
11644 drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
11645 plane_state->color_plane[0].stride != fb->pitches[0]);
11646
11647 if (fb->pitches[0] !=
11648 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11649 drm_dbg_kms(&dev_priv->drm,
11650 "Invalid cursor stride (%u) (cursor width %d)\n",
11651 fb->pitches[0],
11652 drm_rect_width(&plane_state->uapi.dst));
11653 return -EINVAL;
11654 }
11655
11656 /*
11657 * There's something wrong with the cursor on CHV pipe C.
11658 * If it straddles the left edge of the screen then
11659 * moving it away from the edge or disabling it often
11660 * results in a pipe underrun, and often that can lead to
11661 * dead pipe (constant underrun reported, and it scans
11662 * out just a solid color). To recover from that, the
11663 * display power well must be turned off and on again.
11664 * Refuse the put the cursor into that compromised position.
11665 */
11666 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11667 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11668 drm_dbg_kms(&dev_priv->drm,
11669 "CHV cursor C not allowed to straddle the left screen edge\n");
11670 return -EINVAL;
11671 }
11672
11673 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11674
11675 return 0;
11676 }
11677
11678 static void i9xx_update_cursor(struct intel_plane *plane,
11679 const struct intel_crtc_state *crtc_state,
11680 const struct intel_plane_state *plane_state)
11681 {
11682 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11683 enum pipe pipe = plane->pipe;
11684 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11685 unsigned long irqflags;
11686
11687 if (plane_state && plane_state->uapi.visible) {
11688 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11689 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11690
11691 cntl = plane_state->ctl |
11692 i9xx_cursor_ctl_crtc(crtc_state);
11693
11694 if (width != height)
11695 fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11696
11697 base = intel_cursor_base(plane_state);
11698 pos = intel_cursor_position(plane_state);
11699 }
11700
11701 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11702
11703 /*
11704 * On some platforms writing CURCNTR first will also
11705 * cause CURPOS to be armed by the CURBASE write.
11706 * Without the CURCNTR write the CURPOS write would
11707 * arm itself. Thus we always update CURCNTR before
11708 * CURPOS.
11709 *
11710 * On other platforms CURPOS always requires the
11711 * CURBASE write to arm the update. Additonally
11712 * a write to any of the cursor register will cancel
11713 * an already armed cursor update. Thus leaving out
11714 * the CURBASE write after CURPOS could lead to a
11715 * cursor that doesn't appear to move, or even change
11716 * shape. Thus we always write CURBASE.
11717 *
11718 * The other registers are armed by by the CURBASE write
11719 * except when the plane is getting enabled at which time
11720 * the CURCNTR write arms the update.
11721 */
11722
11723 if (INTEL_GEN(dev_priv) >= 9)
11724 skl_write_cursor_wm(plane, crtc_state);
11725
11726 if (plane->cursor.base != base ||
11727 plane->cursor.size != fbc_ctl ||
11728 plane->cursor.cntl != cntl) {
11729 if (HAS_CUR_FBC(dev_priv))
11730 intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
11731 fbc_ctl);
11732 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
11733 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11734 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11735
11736 plane->cursor.base = base;
11737 plane->cursor.size = fbc_ctl;
11738 plane->cursor.cntl = cntl;
11739 } else {
11740 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11741 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11742 }
11743
11744 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11745 }
11746
11747 static void i9xx_disable_cursor(struct intel_plane *plane,
11748 const struct intel_crtc_state *crtc_state)
11749 {
11750 i9xx_update_cursor(plane, crtc_state, NULL);
11751 }
11752
11753 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11754 enum pipe *pipe)
11755 {
11756 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11757 enum intel_display_power_domain power_domain;
11758 intel_wakeref_t wakeref;
11759 bool ret;
11760 u32 val;
11761
11762 /*
11763 * Not 100% correct for planes that can move between pipes,
11764 * but that's only the case for gen2-3 which don't have any
11765 * display power wells.
11766 */
11767 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11768 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11769 if (!wakeref)
11770 return false;
11771
11772 val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
11773
11774 ret = val & MCURSOR_MODE;
11775
11776 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11777 *pipe = plane->pipe;
11778 else
11779 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11780 MCURSOR_PIPE_SELECT_SHIFT;
11781
11782 intel_display_power_put(dev_priv, power_domain, wakeref);
11783
11784 return ret;
11785 }
11786
11787 /* VESA 640x480x72Hz mode to set on the pipe */
11788 static const struct drm_display_mode load_detect_mode = {
11789 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11790 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11791 };
11792
11793 struct drm_framebuffer *
11794 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11795 struct drm_mode_fb_cmd2 *mode_cmd)
11796 {
11797 struct intel_framebuffer *intel_fb;
11798 int ret;
11799
11800 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11801 if (!intel_fb)
11802 return ERR_PTR(-ENOMEM);
11803
11804 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11805 if (ret)
11806 goto err;
11807
11808 return &intel_fb->base;
11809
11810 err:
11811 kfree(intel_fb);
11812 return ERR_PTR(ret);
11813 }
11814
11815 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11816 struct drm_crtc *crtc)
11817 {
11818 struct drm_plane *plane;
11819 struct drm_plane_state *plane_state;
11820 int ret, i;
11821
11822 ret = drm_atomic_add_affected_planes(state, crtc);
11823 if (ret)
11824 return ret;
11825
11826 for_each_new_plane_in_state(state, plane, plane_state, i) {
11827 if (plane_state->crtc != crtc)
11828 continue;
11829
11830 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11831 if (ret)
11832 return ret;
11833
11834 drm_atomic_set_fb_for_plane(plane_state, NULL);
11835 }
11836
11837 return 0;
11838 }
11839
11840 int intel_get_load_detect_pipe(struct drm_connector *connector,
11841 struct intel_load_detect_pipe *old,
11842 struct drm_modeset_acquire_ctx *ctx)
11843 {
11844 struct intel_crtc *intel_crtc;
11845 struct intel_encoder *intel_encoder =
11846 intel_attached_encoder(to_intel_connector(connector));
11847 struct drm_crtc *possible_crtc;
11848 struct drm_encoder *encoder = &intel_encoder->base;
11849 struct drm_crtc *crtc = NULL;
11850 struct drm_device *dev = encoder->dev;
11851 struct drm_i915_private *dev_priv = to_i915(dev);
11852 struct drm_mode_config *config = &dev->mode_config;
11853 struct drm_atomic_state *state = NULL, *restore_state = NULL;
11854 struct drm_connector_state *connector_state;
11855 struct intel_crtc_state *crtc_state;
11856 int ret, i = -1;
11857
11858 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11859 connector->base.id, connector->name,
11860 encoder->base.id, encoder->name);
11861
11862 old->restore_state = NULL;
11863
11864 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
11865
11866 /*
11867 * Algorithm gets a little messy:
11868 *
11869 * - if the connector already has an assigned crtc, use it (but make
11870 * sure it's on first)
11871 *
11872 * - try to find the first unused crtc that can drive this connector,
11873 * and use that if we find one
11874 */
11875
11876 /* See if we already have a CRTC for this connector */
11877 if (connector->state->crtc) {
11878 crtc = connector->state->crtc;
11879
11880 ret = drm_modeset_lock(&crtc->mutex, ctx);
11881 if (ret)
11882 goto fail;
11883
11884 /* Make sure the crtc and connector are running */
11885 goto found;
11886 }
11887
11888 /* Find an unused one (if possible) */
11889 for_each_crtc(dev, possible_crtc) {
11890 i++;
11891 if (!(encoder->possible_crtcs & (1 << i)))
11892 continue;
11893
11894 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11895 if (ret)
11896 goto fail;
11897
11898 if (possible_crtc->state->enable) {
11899 drm_modeset_unlock(&possible_crtc->mutex);
11900 continue;
11901 }
11902
11903 crtc = possible_crtc;
11904 break;
11905 }
11906
11907 /*
11908 * If we didn't find an unused CRTC, don't use any.
11909 */
11910 if (!crtc) {
11911 drm_dbg_kms(&dev_priv->drm,
11912 "no pipe available for load-detect\n");
11913 ret = -ENODEV;
11914 goto fail;
11915 }
11916
11917 found:
11918 intel_crtc = to_intel_crtc(crtc);
11919
11920 state = drm_atomic_state_alloc(dev);
11921 restore_state = drm_atomic_state_alloc(dev);
11922 if (!state || !restore_state) {
11923 ret = -ENOMEM;
11924 goto fail;
11925 }
11926
11927 state->acquire_ctx = ctx;
11928 restore_state->acquire_ctx = ctx;
11929
11930 connector_state = drm_atomic_get_connector_state(state, connector);
11931 if (IS_ERR(connector_state)) {
11932 ret = PTR_ERR(connector_state);
11933 goto fail;
11934 }
11935
11936 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11937 if (ret)
11938 goto fail;
11939
11940 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11941 if (IS_ERR(crtc_state)) {
11942 ret = PTR_ERR(crtc_state);
11943 goto fail;
11944 }
11945
11946 crtc_state->uapi.active = true;
11947
11948 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11949 &load_detect_mode);
11950 if (ret)
11951 goto fail;
11952
11953 ret = intel_modeset_disable_planes(state, crtc);
11954 if (ret)
11955 goto fail;
11956
11957 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11958 if (!ret)
11959 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11960 if (!ret)
11961 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11962 if (ret) {
11963 drm_dbg_kms(&dev_priv->drm,
11964 "Failed to create a copy of old state to restore: %i\n",
11965 ret);
11966 goto fail;
11967 }
11968
11969 ret = drm_atomic_commit(state);
11970 if (ret) {
11971 drm_dbg_kms(&dev_priv->drm,
11972 "failed to set mode on load-detect pipe\n");
11973 goto fail;
11974 }
11975
11976 old->restore_state = restore_state;
11977 drm_atomic_state_put(state);
11978
11979 /* let the connector get through one full cycle before testing */
11980 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11981 return true;
11982
11983 fail:
11984 if (state) {
11985 drm_atomic_state_put(state);
11986 state = NULL;
11987 }
11988 if (restore_state) {
11989 drm_atomic_state_put(restore_state);
11990 restore_state = NULL;
11991 }
11992
11993 if (ret == -EDEADLK)
11994 return ret;
11995
11996 return false;
11997 }
11998
11999 void intel_release_load_detect_pipe(struct drm_connector *connector,
12000 struct intel_load_detect_pipe *old,
12001 struct drm_modeset_acquire_ctx *ctx)
12002 {
12003 struct intel_encoder *intel_encoder =
12004 intel_attached_encoder(to_intel_connector(connector));
12005 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
12006 struct drm_encoder *encoder = &intel_encoder->base;
12007 struct drm_atomic_state *state = old->restore_state;
12008 int ret;
12009
12010 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
12011 connector->base.id, connector->name,
12012 encoder->base.id, encoder->name);
12013
12014 if (!state)
12015 return;
12016
12017 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
12018 if (ret)
12019 drm_dbg_kms(&i915->drm,
12020 "Couldn't release load detect pipe: %i\n", ret);
12021 drm_atomic_state_put(state);
12022 }
12023
12024 static int i9xx_pll_refclk(struct drm_device *dev,
12025 const struct intel_crtc_state *pipe_config)
12026 {
12027 struct drm_i915_private *dev_priv = to_i915(dev);
12028 u32 dpll = pipe_config->dpll_hw_state.dpll;
12029
12030 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
12031 return dev_priv->vbt.lvds_ssc_freq;
12032 else if (HAS_PCH_SPLIT(dev_priv))
12033 return 120000;
12034 else if (!IS_GEN(dev_priv, 2))
12035 return 96000;
12036 else
12037 return 48000;
12038 }
12039
12040 /* Returns the clock of the currently programmed mode of the given pipe. */
12041 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
12042 struct intel_crtc_state *pipe_config)
12043 {
12044 struct drm_device *dev = crtc->base.dev;
12045 struct drm_i915_private *dev_priv = to_i915(dev);
12046 enum pipe pipe = crtc->pipe;
12047 u32 dpll = pipe_config->dpll_hw_state.dpll;
12048 u32 fp;
12049 struct dpll clock;
12050 int port_clock;
12051 int refclk = i9xx_pll_refclk(dev, pipe_config);
12052
12053 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12054 fp = pipe_config->dpll_hw_state.fp0;
12055 else
12056 fp = pipe_config->dpll_hw_state.fp1;
12057
12058 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12059 if (IS_PINEVIEW(dev_priv)) {
12060 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
12061 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
12062 } else {
12063 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12064 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12065 }
12066
12067 if (!IS_GEN(dev_priv, 2)) {
12068 if (IS_PINEVIEW(dev_priv))
12069 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
12070 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
12071 else
12072 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12073 DPLL_FPA01_P1_POST_DIV_SHIFT);
12074
12075 switch (dpll & DPLL_MODE_MASK) {
12076 case DPLLB_MODE_DAC_SERIAL:
12077 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12078 5 : 10;
12079 break;
12080 case DPLLB_MODE_LVDS:
12081 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12082 7 : 14;
12083 break;
12084 default:
12085 drm_dbg_kms(&dev_priv->drm,
12086 "Unknown DPLL mode %08x in programmed "
12087 "mode\n", (int)(dpll & DPLL_MODE_MASK));
12088 return;
12089 }
12090
12091 if (IS_PINEVIEW(dev_priv))
12092 port_clock = pnv_calc_dpll_params(refclk, &clock);
12093 else
12094 port_clock = i9xx_calc_dpll_params(refclk, &clock);
12095 } else {
12096 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
12097 LVDS);
12098 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
12099
12100 if (is_lvds) {
12101 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12102 DPLL_FPA01_P1_POST_DIV_SHIFT);
12103
12104 if (lvds & LVDS_CLKB_POWER_UP)
12105 clock.p2 = 7;
12106 else
12107 clock.p2 = 14;
12108 } else {
12109 if (dpll & PLL_P1_DIVIDE_BY_TWO)
12110 clock.p1 = 2;
12111 else {
12112 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
12113 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
12114 }
12115 if (dpll & PLL_P2_DIVIDE_BY_4)
12116 clock.p2 = 4;
12117 else
12118 clock.p2 = 2;
12119 }
12120
12121 port_clock = i9xx_calc_dpll_params(refclk, &clock);
12122 }
12123
12124 /*
12125 * This value includes pixel_multiplier. We will use
12126 * port_clock to compute adjusted_mode.crtc_clock in the
12127 * encoder's get_config() function.
12128 */
12129 pipe_config->port_clock = port_clock;
12130 }
12131
12132 int intel_dotclock_calculate(int link_freq,
12133 const struct intel_link_m_n *m_n)
12134 {
12135 /*
12136 * The calculation for the data clock is:
12137 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
12138 * But we want to avoid losing precison if possible, so:
12139 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
12140 *
12141 * and the link clock is simpler:
12142 * link_clock = (m * link_clock) / n
12143 */
12144
12145 if (!m_n->link_n)
12146 return 0;
12147
12148 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
12149 }
12150
12151 static void ilk_pch_clock_get(struct intel_crtc *crtc,
12152 struct intel_crtc_state *pipe_config)
12153 {
12154 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12155
12156 /* read out port_clock from the DPLL */
12157 i9xx_crtc_clock_get(crtc, pipe_config);
12158
12159 /*
12160 * In case there is an active pipe without active ports,
12161 * we may need some idea for the dotclock anyway.
12162 * Calculate one based on the FDI configuration.
12163 */
12164 pipe_config->hw.adjusted_mode.crtc_clock =
12165 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12166 &pipe_config->fdi_m_n);
12167 }
12168
12169 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
12170 struct intel_crtc *crtc)
12171 {
12172 memset(crtc_state, 0, sizeof(*crtc_state));
12173
12174 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
12175
12176 crtc_state->cpu_transcoder = INVALID_TRANSCODER;
12177 crtc_state->master_transcoder = INVALID_TRANSCODER;
12178 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
12179 crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
12180 crtc_state->scaler_state.scaler_id = -1;
12181 crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
12182 }
12183
12184 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
12185 {
12186 struct intel_crtc_state *crtc_state;
12187
12188 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
12189
12190 if (crtc_state)
12191 intel_crtc_state_reset(crtc_state, crtc);
12192
12193 return crtc_state;
12194 }
12195
12196 /* Returns the currently programmed mode of the given encoder. */
12197 struct drm_display_mode *
12198 intel_encoder_current_mode(struct intel_encoder *encoder)
12199 {
12200 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12201 struct intel_crtc_state *crtc_state;
12202 struct drm_display_mode *mode;
12203 struct intel_crtc *crtc;
12204 enum pipe pipe;
12205
12206 if (!encoder->get_hw_state(encoder, &pipe))
12207 return NULL;
12208
12209 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12210
12211 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
12212 if (!mode)
12213 return NULL;
12214
12215 crtc_state = intel_crtc_state_alloc(crtc);
12216 if (!crtc_state) {
12217 kfree(mode);
12218 return NULL;
12219 }
12220
12221 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
12222 kfree(crtc_state);
12223 kfree(mode);
12224 return NULL;
12225 }
12226
12227 encoder->get_config(encoder, crtc_state);
12228
12229 intel_mode_from_pipe_config(mode, crtc_state);
12230
12231 kfree(crtc_state);
12232
12233 return mode;
12234 }
12235
12236 static void intel_crtc_destroy(struct drm_crtc *crtc)
12237 {
12238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12239
12240 drm_crtc_cleanup(crtc);
12241 kfree(intel_crtc);
12242 }
12243
12244 /**
12245 * intel_wm_need_update - Check whether watermarks need updating
12246 * @cur: current plane state
12247 * @new: new plane state
12248 *
12249 * Check current plane state versus the new one to determine whether
12250 * watermarks need to be recalculated.
12251 *
12252 * Returns true or false.
12253 */
12254 static bool intel_wm_need_update(const struct intel_plane_state *cur,
12255 struct intel_plane_state *new)
12256 {
12257 /* Update watermarks on tiling or size changes. */
12258 if (new->uapi.visible != cur->uapi.visible)
12259 return true;
12260
12261 if (!cur->hw.fb || !new->hw.fb)
12262 return false;
12263
12264 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
12265 cur->hw.rotation != new->hw.rotation ||
12266 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
12267 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
12268 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
12269 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
12270 return true;
12271
12272 return false;
12273 }
12274
12275 static bool needs_scaling(const struct intel_plane_state *state)
12276 {
12277 int src_w = drm_rect_width(&state->uapi.src) >> 16;
12278 int src_h = drm_rect_height(&state->uapi.src) >> 16;
12279 int dst_w = drm_rect_width(&state->uapi.dst);
12280 int dst_h = drm_rect_height(&state->uapi.dst);
12281
12282 return (src_w != dst_w || src_h != dst_h);
12283 }
12284
12285 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
12286 struct intel_crtc_state *crtc_state,
12287 const struct intel_plane_state *old_plane_state,
12288 struct intel_plane_state *plane_state)
12289 {
12290 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12291 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12292 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12293 bool mode_changed = needs_modeset(crtc_state);
12294 bool was_crtc_enabled = old_crtc_state->hw.active;
12295 bool is_crtc_enabled = crtc_state->hw.active;
12296 bool turn_off, turn_on, visible, was_visible;
12297 int ret;
12298
12299 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
12300 ret = skl_update_scaler_plane(crtc_state, plane_state);
12301 if (ret)
12302 return ret;
12303 }
12304
12305 was_visible = old_plane_state->uapi.visible;
12306 visible = plane_state->uapi.visible;
12307
12308 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
12309 was_visible = false;
12310
12311 /*
12312 * Visibility is calculated as if the crtc was on, but
12313 * after scaler setup everything depends on it being off
12314 * when the crtc isn't active.
12315 *
12316 * FIXME this is wrong for watermarks. Watermarks should also
12317 * be computed as if the pipe would be active. Perhaps move
12318 * per-plane wm computation to the .check_plane() hook, and
12319 * only combine the results from all planes in the current place?
12320 */
12321 if (!is_crtc_enabled) {
12322 intel_plane_set_invisible(crtc_state, plane_state);
12323 visible = false;
12324 }
12325
12326 if (!was_visible && !visible)
12327 return 0;
12328
12329 turn_off = was_visible && (!visible || mode_changed);
12330 turn_on = visible && (!was_visible || mode_changed);
12331
12332 drm_dbg_atomic(&dev_priv->drm,
12333 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12334 crtc->base.base.id, crtc->base.name,
12335 plane->base.base.id, plane->base.name,
12336 was_visible, visible,
12337 turn_off, turn_on, mode_changed);
12338
12339 if (turn_on) {
12340 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12341 crtc_state->update_wm_pre = true;
12342
12343 /* must disable cxsr around plane enable/disable */
12344 if (plane->id != PLANE_CURSOR)
12345 crtc_state->disable_cxsr = true;
12346 } else if (turn_off) {
12347 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12348 crtc_state->update_wm_post = true;
12349
12350 /* must disable cxsr around plane enable/disable */
12351 if (plane->id != PLANE_CURSOR)
12352 crtc_state->disable_cxsr = true;
12353 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
12354 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12355 /* FIXME bollocks */
12356 crtc_state->update_wm_pre = true;
12357 crtc_state->update_wm_post = true;
12358 }
12359 }
12360
12361 if (visible || was_visible)
12362 crtc_state->fb_bits |= plane->frontbuffer_bit;
12363
12364 /*
12365 * ILK/SNB DVSACNTR/Sprite Enable
12366 * IVB SPR_CTL/Sprite Enable
12367 * "When in Self Refresh Big FIFO mode, a write to enable the
12368 * plane will be internally buffered and delayed while Big FIFO
12369 * mode is exiting."
12370 *
12371 * Which means that enabling the sprite can take an extra frame
12372 * when we start in big FIFO mode (LP1+). Thus we need to drop
12373 * down to LP0 and wait for vblank in order to make sure the
12374 * sprite gets enabled on the next vblank after the register write.
12375 * Doing otherwise would risk enabling the sprite one frame after
12376 * we've already signalled flip completion. We can resume LP1+
12377 * once the sprite has been enabled.
12378 *
12379 *
12380 * WaCxSRDisabledForSpriteScaling:ivb
12381 * IVB SPR_SCALE/Scaling Enable
12382 * "Low Power watermarks must be disabled for at least one
12383 * frame before enabling sprite scaling, and kept disabled
12384 * until sprite scaling is disabled."
12385 *
12386 * ILK/SNB DVSASCALE/Scaling Enable
12387 * "When in Self Refresh Big FIFO mode, scaling enable will be
12388 * masked off while Big FIFO mode is exiting."
12389 *
12390 * Despite the w/a only being listed for IVB we assume that
12391 * the ILK/SNB note has similar ramifications, hence we apply
12392 * the w/a on all three platforms.
12393 *
12394 * With experimental results seems this is needed also for primary
12395 * plane, not only sprite plane.
12396 */
12397 if (plane->id != PLANE_CURSOR &&
12398 (IS_GEN_RANGE(dev_priv, 5, 6) ||
12399 IS_IVYBRIDGE(dev_priv)) &&
12400 (turn_on || (!needs_scaling(old_plane_state) &&
12401 needs_scaling(plane_state))))
12402 crtc_state->disable_lp_wm = true;
12403
12404 return 0;
12405 }
12406
12407 static bool encoders_cloneable(const struct intel_encoder *a,
12408 const struct intel_encoder *b)
12409 {
12410 /* masks could be asymmetric, so check both ways */
12411 return a == b || (a->cloneable & (1 << b->type) &&
12412 b->cloneable & (1 << a->type));
12413 }
12414
12415 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12416 struct intel_crtc *crtc,
12417 struct intel_encoder *encoder)
12418 {
12419 struct intel_encoder *source_encoder;
12420 struct drm_connector *connector;
12421 struct drm_connector_state *connector_state;
12422 int i;
12423
12424 for_each_new_connector_in_state(state, connector, connector_state, i) {
12425 if (connector_state->crtc != &crtc->base)
12426 continue;
12427
12428 source_encoder =
12429 to_intel_encoder(connector_state->best_encoder);
12430 if (!encoders_cloneable(encoder, source_encoder))
12431 return false;
12432 }
12433
12434 return true;
12435 }
12436
12437 static int icl_add_linked_planes(struct intel_atomic_state *state)
12438 {
12439 struct intel_plane *plane, *linked;
12440 struct intel_plane_state *plane_state, *linked_plane_state;
12441 int i;
12442
12443 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12444 linked = plane_state->planar_linked_plane;
12445
12446 if (!linked)
12447 continue;
12448
12449 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12450 if (IS_ERR(linked_plane_state))
12451 return PTR_ERR(linked_plane_state);
12452
12453 drm_WARN_ON(state->base.dev,
12454 linked_plane_state->planar_linked_plane != plane);
12455 drm_WARN_ON(state->base.dev,
12456 linked_plane_state->planar_slave == plane_state->planar_slave);
12457 }
12458
12459 return 0;
12460 }
12461
12462 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12463 {
12464 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12465 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12466 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12467 struct intel_plane *plane, *linked;
12468 struct intel_plane_state *plane_state;
12469 int i;
12470
12471 if (INTEL_GEN(dev_priv) < 11)
12472 return 0;
12473
12474 /*
12475 * Destroy all old plane links and make the slave plane invisible
12476 * in the crtc_state->active_planes mask.
12477 */
12478 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12479 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12480 continue;
12481
12482 plane_state->planar_linked_plane = NULL;
12483 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12484 crtc_state->active_planes &= ~BIT(plane->id);
12485 crtc_state->update_planes |= BIT(plane->id);
12486 }
12487
12488 plane_state->planar_slave = false;
12489 }
12490
12491 if (!crtc_state->nv12_planes)
12492 return 0;
12493
12494 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12495 struct intel_plane_state *linked_state = NULL;
12496
12497 if (plane->pipe != crtc->pipe ||
12498 !(crtc_state->nv12_planes & BIT(plane->id)))
12499 continue;
12500
12501 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12502 if (!icl_is_nv12_y_plane(linked->id))
12503 continue;
12504
12505 if (crtc_state->active_planes & BIT(linked->id))
12506 continue;
12507
12508 linked_state = intel_atomic_get_plane_state(state, linked);
12509 if (IS_ERR(linked_state))
12510 return PTR_ERR(linked_state);
12511
12512 break;
12513 }
12514
12515 if (!linked_state) {
12516 drm_dbg_kms(&dev_priv->drm,
12517 "Need %d free Y planes for planar YUV\n",
12518 hweight8(crtc_state->nv12_planes));
12519
12520 return -EINVAL;
12521 }
12522
12523 plane_state->planar_linked_plane = linked;
12524
12525 linked_state->planar_slave = true;
12526 linked_state->planar_linked_plane = plane;
12527 crtc_state->active_planes |= BIT(linked->id);
12528 crtc_state->update_planes |= BIT(linked->id);
12529 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
12530 linked->base.name, plane->base.name);
12531
12532 /* Copy parameters to slave plane */
12533 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12534 linked_state->color_ctl = plane_state->color_ctl;
12535 linked_state->view = plane_state->view;
12536 memcpy(linked_state->color_plane, plane_state->color_plane,
12537 sizeof(linked_state->color_plane));
12538
12539 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12540 linked_state->uapi.src = plane_state->uapi.src;
12541 linked_state->uapi.dst = plane_state->uapi.dst;
12542
12543 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12544 if (linked->id == PLANE_SPRITE5)
12545 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12546 else if (linked->id == PLANE_SPRITE4)
12547 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12548 else
12549 MISSING_CASE(linked->id);
12550 }
12551 }
12552
12553 return 0;
12554 }
12555
12556 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12557 {
12558 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12559 struct intel_atomic_state *state =
12560 to_intel_atomic_state(new_crtc_state->uapi.state);
12561 const struct intel_crtc_state *old_crtc_state =
12562 intel_atomic_get_old_crtc_state(state, crtc);
12563
12564 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12565 }
12566
12567 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
12568 {
12569 const struct drm_display_mode *adjusted_mode =
12570 &crtc_state->hw.adjusted_mode;
12571
12572 if (!crtc_state->hw.enable)
12573 return 0;
12574
12575 return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12576 adjusted_mode->crtc_clock);
12577 }
12578
12579 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
12580 const struct intel_cdclk_state *cdclk_state)
12581 {
12582 const struct drm_display_mode *adjusted_mode =
12583 &crtc_state->hw.adjusted_mode;
12584
12585 if (!crtc_state->hw.enable)
12586 return 0;
12587
12588 return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12589 cdclk_state->logical.cdclk);
12590 }
12591
12592 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
12593 {
12594 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12595 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12596 const struct drm_display_mode *adjusted_mode =
12597 &crtc_state->hw.adjusted_mode;
12598 u16 linetime_wm;
12599
12600 if (!crtc_state->hw.enable)
12601 return 0;
12602
12603 linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
12604 crtc_state->pixel_rate);
12605
12606 /* Display WA #1135: BXT:ALL GLK:ALL */
12607 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
12608 linetime_wm /= 2;
12609
12610 return linetime_wm;
12611 }
12612
12613 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
12614 struct intel_crtc *crtc)
12615 {
12616 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12617 struct intel_crtc_state *crtc_state =
12618 intel_atomic_get_new_crtc_state(state, crtc);
12619 const struct intel_cdclk_state *cdclk_state;
12620
12621 if (INTEL_GEN(dev_priv) >= 9)
12622 crtc_state->linetime = skl_linetime_wm(crtc_state);
12623 else
12624 crtc_state->linetime = hsw_linetime_wm(crtc_state);
12625
12626 if (!hsw_crtc_supports_ips(crtc))
12627 return 0;
12628
12629 cdclk_state = intel_atomic_get_cdclk_state(state);
12630 if (IS_ERR(cdclk_state))
12631 return PTR_ERR(cdclk_state);
12632
12633 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
12634 cdclk_state);
12635
12636 return 0;
12637 }
12638
12639 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12640 struct intel_crtc *crtc)
12641 {
12642 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12643 struct intel_crtc_state *crtc_state =
12644 intel_atomic_get_new_crtc_state(state, crtc);
12645 bool mode_changed = needs_modeset(crtc_state);
12646 int ret;
12647
12648 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12649 mode_changed && !crtc_state->hw.active)
12650 crtc_state->update_wm_post = true;
12651
12652 if (mode_changed && crtc_state->hw.enable &&
12653 dev_priv->display.crtc_compute_clock &&
12654 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
12655 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12656 if (ret)
12657 return ret;
12658 }
12659
12660 /*
12661 * May need to update pipe gamma enable bits
12662 * when C8 planes are getting enabled/disabled.
12663 */
12664 if (c8_planes_changed(crtc_state))
12665 crtc_state->uapi.color_mgmt_changed = true;
12666
12667 if (mode_changed || crtc_state->update_pipe ||
12668 crtc_state->uapi.color_mgmt_changed) {
12669 ret = intel_color_check(crtc_state);
12670 if (ret)
12671 return ret;
12672 }
12673
12674 if (dev_priv->display.compute_pipe_wm) {
12675 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12676 if (ret) {
12677 drm_dbg_kms(&dev_priv->drm,
12678 "Target pipe watermarks are invalid\n");
12679 return ret;
12680 }
12681 }
12682
12683 if (dev_priv->display.compute_intermediate_wm) {
12684 if (drm_WARN_ON(&dev_priv->drm,
12685 !dev_priv->display.compute_pipe_wm))
12686 return 0;
12687
12688 /*
12689 * Calculate 'intermediate' watermarks that satisfy both the
12690 * old state and the new state. We can program these
12691 * immediately.
12692 */
12693 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12694 if (ret) {
12695 drm_dbg_kms(&dev_priv->drm,
12696 "No valid intermediate pipe watermarks are possible\n");
12697 return ret;
12698 }
12699 }
12700
12701 if (INTEL_GEN(dev_priv) >= 9) {
12702 if (mode_changed || crtc_state->update_pipe) {
12703 ret = skl_update_scaler_crtc(crtc_state);
12704 if (ret)
12705 return ret;
12706 }
12707
12708 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
12709 if (ret)
12710 return ret;
12711 }
12712
12713 if (HAS_IPS(dev_priv)) {
12714 ret = hsw_compute_ips_config(crtc_state);
12715 if (ret)
12716 return ret;
12717 }
12718
12719 if (INTEL_GEN(dev_priv) >= 9 ||
12720 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12721 ret = hsw_compute_linetime_wm(state, crtc);
12722 if (ret)
12723 return ret;
12724
12725 }
12726
12727 return 0;
12728 }
12729
12730 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12731 {
12732 struct intel_connector *connector;
12733 struct drm_connector_list_iter conn_iter;
12734
12735 drm_connector_list_iter_begin(dev, &conn_iter);
12736 for_each_intel_connector_iter(connector, &conn_iter) {
12737 if (connector->base.state->crtc)
12738 drm_connector_put(&connector->base);
12739
12740 if (connector->base.encoder) {
12741 connector->base.state->best_encoder =
12742 connector->base.encoder;
12743 connector->base.state->crtc =
12744 connector->base.encoder->crtc;
12745
12746 drm_connector_get(&connector->base);
12747 } else {
12748 connector->base.state->best_encoder = NULL;
12749 connector->base.state->crtc = NULL;
12750 }
12751 }
12752 drm_connector_list_iter_end(&conn_iter);
12753 }
12754
12755 static int
12756 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12757 struct intel_crtc_state *pipe_config)
12758 {
12759 struct drm_connector *connector = conn_state->connector;
12760 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12761 const struct drm_display_info *info = &connector->display_info;
12762 int bpp;
12763
12764 switch (conn_state->max_bpc) {
12765 case 6 ... 7:
12766 bpp = 6 * 3;
12767 break;
12768 case 8 ... 9:
12769 bpp = 8 * 3;
12770 break;
12771 case 10 ... 11:
12772 bpp = 10 * 3;
12773 break;
12774 case 12:
12775 bpp = 12 * 3;
12776 break;
12777 default:
12778 return -EINVAL;
12779 }
12780
12781 if (bpp < pipe_config->pipe_bpp) {
12782 drm_dbg_kms(&i915->drm,
12783 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12784 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12785 connector->base.id, connector->name,
12786 bpp, 3 * info->bpc,
12787 3 * conn_state->max_requested_bpc,
12788 pipe_config->pipe_bpp);
12789
12790 pipe_config->pipe_bpp = bpp;
12791 }
12792
12793 return 0;
12794 }
12795
12796 static int
12797 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12798 struct intel_crtc_state *pipe_config)
12799 {
12800 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12801 struct drm_atomic_state *state = pipe_config->uapi.state;
12802 struct drm_connector *connector;
12803 struct drm_connector_state *connector_state;
12804 int bpp, i;
12805
12806 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12807 IS_CHERRYVIEW(dev_priv)))
12808 bpp = 10*3;
12809 else if (INTEL_GEN(dev_priv) >= 5)
12810 bpp = 12*3;
12811 else
12812 bpp = 8*3;
12813
12814 pipe_config->pipe_bpp = bpp;
12815
12816 /* Clamp display bpp to connector max bpp */
12817 for_each_new_connector_in_state(state, connector, connector_state, i) {
12818 int ret;
12819
12820 if (connector_state->crtc != &crtc->base)
12821 continue;
12822
12823 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12824 if (ret)
12825 return ret;
12826 }
12827
12828 return 0;
12829 }
12830
12831 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
12832 const struct drm_display_mode *mode)
12833 {
12834 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
12835 "type: 0x%x flags: 0x%x\n",
12836 mode->crtc_clock,
12837 mode->crtc_hdisplay, mode->crtc_hsync_start,
12838 mode->crtc_hsync_end, mode->crtc_htotal,
12839 mode->crtc_vdisplay, mode->crtc_vsync_start,
12840 mode->crtc_vsync_end, mode->crtc_vtotal,
12841 mode->type, mode->flags);
12842 }
12843
12844 static void
12845 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12846 const char *id, unsigned int lane_count,
12847 const struct intel_link_m_n *m_n)
12848 {
12849 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12850
12851 drm_dbg_kms(&i915->drm,
12852 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12853 id, lane_count,
12854 m_n->gmch_m, m_n->gmch_n,
12855 m_n->link_m, m_n->link_n, m_n->tu);
12856 }
12857
12858 static void
12859 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12860 const union hdmi_infoframe *frame)
12861 {
12862 if (!drm_debug_enabled(DRM_UT_KMS))
12863 return;
12864
12865 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12866 }
12867
12868 static void
12869 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
12870 const struct drm_dp_vsc_sdp *vsc)
12871 {
12872 if (!drm_debug_enabled(DRM_UT_KMS))
12873 return;
12874
12875 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
12876 }
12877
12878 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12879
12880 static const char * const output_type_str[] = {
12881 OUTPUT_TYPE(UNUSED),
12882 OUTPUT_TYPE(ANALOG),
12883 OUTPUT_TYPE(DVO),
12884 OUTPUT_TYPE(SDVO),
12885 OUTPUT_TYPE(LVDS),
12886 OUTPUT_TYPE(TVOUT),
12887 OUTPUT_TYPE(HDMI),
12888 OUTPUT_TYPE(DP),
12889 OUTPUT_TYPE(EDP),
12890 OUTPUT_TYPE(DSI),
12891 OUTPUT_TYPE(DDI),
12892 OUTPUT_TYPE(DP_MST),
12893 };
12894
12895 #undef OUTPUT_TYPE
12896
12897 static void snprintf_output_types(char *buf, size_t len,
12898 unsigned int output_types)
12899 {
12900 char *str = buf;
12901 int i;
12902
12903 str[0] = '\0';
12904
12905 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12906 int r;
12907
12908 if ((output_types & BIT(i)) == 0)
12909 continue;
12910
12911 r = snprintf(str, len, "%s%s",
12912 str != buf ? "," : "", output_type_str[i]);
12913 if (r >= len)
12914 break;
12915 str += r;
12916 len -= r;
12917
12918 output_types &= ~BIT(i);
12919 }
12920
12921 WARN_ON_ONCE(output_types != 0);
12922 }
12923
12924 static const char * const output_format_str[] = {
12925 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12926 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12927 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12928 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12929 };
12930
12931 static const char *output_formats(enum intel_output_format format)
12932 {
12933 if (format >= ARRAY_SIZE(output_format_str))
12934 format = INTEL_OUTPUT_FORMAT_INVALID;
12935 return output_format_str[format];
12936 }
12937
12938 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12939 {
12940 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12941 struct drm_i915_private *i915 = to_i915(plane->base.dev);
12942 const struct drm_framebuffer *fb = plane_state->hw.fb;
12943 struct drm_format_name_buf format_name;
12944
12945 if (!fb) {
12946 drm_dbg_kms(&i915->drm,
12947 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12948 plane->base.base.id, plane->base.name,
12949 yesno(plane_state->uapi.visible));
12950 return;
12951 }
12952
12953 drm_dbg_kms(&i915->drm,
12954 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12955 plane->base.base.id, plane->base.name,
12956 fb->base.id, fb->width, fb->height,
12957 drm_get_format_name(fb->format->format, &format_name),
12958 yesno(plane_state->uapi.visible));
12959 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
12960 plane_state->hw.rotation, plane_state->scaler_id);
12961 if (plane_state->uapi.visible)
12962 drm_dbg_kms(&i915->drm,
12963 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12964 DRM_RECT_FP_ARG(&plane_state->uapi.src),
12965 DRM_RECT_ARG(&plane_state->uapi.dst));
12966 }
12967
12968 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12969 struct intel_atomic_state *state,
12970 const char *context)
12971 {
12972 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12973 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12974 const struct intel_plane_state *plane_state;
12975 struct intel_plane *plane;
12976 char buf[64];
12977 int i;
12978
12979 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
12980 crtc->base.base.id, crtc->base.name,
12981 yesno(pipe_config->hw.enable), context);
12982
12983 if (!pipe_config->hw.enable)
12984 goto dump_planes;
12985
12986 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12987 drm_dbg_kms(&dev_priv->drm,
12988 "active: %s, output_types: %s (0x%x), output format: %s\n",
12989 yesno(pipe_config->hw.active),
12990 buf, pipe_config->output_types,
12991 output_formats(pipe_config->output_format));
12992
12993 drm_dbg_kms(&dev_priv->drm,
12994 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12995 transcoder_name(pipe_config->cpu_transcoder),
12996 pipe_config->pipe_bpp, pipe_config->dither);
12997
12998 drm_dbg_kms(&dev_priv->drm,
12999 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
13000 transcoder_name(pipe_config->master_transcoder),
13001 pipe_config->sync_mode_slaves_mask);
13002
13003 if (pipe_config->has_pch_encoder)
13004 intel_dump_m_n_config(pipe_config, "fdi",
13005 pipe_config->fdi_lanes,
13006 &pipe_config->fdi_m_n);
13007
13008 if (intel_crtc_has_dp_encoder(pipe_config)) {
13009 intel_dump_m_n_config(pipe_config, "dp m_n",
13010 pipe_config->lane_count, &pipe_config->dp_m_n);
13011 if (pipe_config->has_drrs)
13012 intel_dump_m_n_config(pipe_config, "dp m2_n2",
13013 pipe_config->lane_count,
13014 &pipe_config->dp_m2_n2);
13015 }
13016
13017 drm_dbg_kms(&dev_priv->drm,
13018 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
13019 pipe_config->has_audio, pipe_config->has_infoframe,
13020 pipe_config->infoframes.enable);
13021
13022 if (pipe_config->infoframes.enable &
13023 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
13024 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
13025 pipe_config->infoframes.gcp);
13026 if (pipe_config->infoframes.enable &
13027 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
13028 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
13029 if (pipe_config->infoframes.enable &
13030 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
13031 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
13032 if (pipe_config->infoframes.enable &
13033 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
13034 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
13035 if (pipe_config->infoframes.enable &
13036 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
13037 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13038 if (pipe_config->infoframes.enable &
13039 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
13040 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13041 if (pipe_config->infoframes.enable &
13042 intel_hdmi_infoframe_enable(DP_SDP_VSC))
13043 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
13044
13045 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
13046 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
13047 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
13048 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
13049 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
13050 drm_dbg_kms(&dev_priv->drm,
13051 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
13052 pipe_config->port_clock,
13053 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
13054 pipe_config->pixel_rate);
13055
13056 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
13057 pipe_config->linetime, pipe_config->ips_linetime);
13058
13059 if (INTEL_GEN(dev_priv) >= 9)
13060 drm_dbg_kms(&dev_priv->drm,
13061 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
13062 crtc->num_scalers,
13063 pipe_config->scaler_state.scaler_users,
13064 pipe_config->scaler_state.scaler_id);
13065
13066 if (HAS_GMCH(dev_priv))
13067 drm_dbg_kms(&dev_priv->drm,
13068 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
13069 pipe_config->gmch_pfit.control,
13070 pipe_config->gmch_pfit.pgm_ratios,
13071 pipe_config->gmch_pfit.lvds_border_bits);
13072 else
13073 drm_dbg_kms(&dev_priv->drm,
13074 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
13075 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
13076 enableddisabled(pipe_config->pch_pfit.enabled),
13077 yesno(pipe_config->pch_pfit.force_thru));
13078
13079 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
13080 pipe_config->ips_enabled, pipe_config->double_wide);
13081
13082 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
13083
13084 if (IS_CHERRYVIEW(dev_priv))
13085 drm_dbg_kms(&dev_priv->drm,
13086 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13087 pipe_config->cgm_mode, pipe_config->gamma_mode,
13088 pipe_config->gamma_enable, pipe_config->csc_enable);
13089 else
13090 drm_dbg_kms(&dev_priv->drm,
13091 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13092 pipe_config->csc_mode, pipe_config->gamma_mode,
13093 pipe_config->gamma_enable, pipe_config->csc_enable);
13094
13095 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
13096 transcoder_name(pipe_config->mst_master_transcoder));
13097
13098 dump_planes:
13099 if (!state)
13100 return;
13101
13102 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13103 if (plane->pipe == crtc->pipe)
13104 intel_dump_plane_state(plane_state);
13105 }
13106 }
13107
13108 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
13109 {
13110 struct drm_device *dev = state->base.dev;
13111 struct drm_connector *connector;
13112 struct drm_connector_list_iter conn_iter;
13113 unsigned int used_ports = 0;
13114 unsigned int used_mst_ports = 0;
13115 bool ret = true;
13116
13117 /*
13118 * We're going to peek into connector->state,
13119 * hence connection_mutex must be held.
13120 */
13121 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
13122
13123 /*
13124 * Walk the connector list instead of the encoder
13125 * list to detect the problem on ddi platforms
13126 * where there's just one encoder per digital port.
13127 */
13128 drm_connector_list_iter_begin(dev, &conn_iter);
13129 drm_for_each_connector_iter(connector, &conn_iter) {
13130 struct drm_connector_state *connector_state;
13131 struct intel_encoder *encoder;
13132
13133 connector_state =
13134 drm_atomic_get_new_connector_state(&state->base,
13135 connector);
13136 if (!connector_state)
13137 connector_state = connector->state;
13138
13139 if (!connector_state->best_encoder)
13140 continue;
13141
13142 encoder = to_intel_encoder(connector_state->best_encoder);
13143
13144 drm_WARN_ON(dev, !connector_state->crtc);
13145
13146 switch (encoder->type) {
13147 case INTEL_OUTPUT_DDI:
13148 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
13149 break;
13150 /* else, fall through */
13151 case INTEL_OUTPUT_DP:
13152 case INTEL_OUTPUT_HDMI:
13153 case INTEL_OUTPUT_EDP:
13154 /* the same port mustn't appear more than once */
13155 if (used_ports & BIT(encoder->port))
13156 ret = false;
13157
13158 used_ports |= BIT(encoder->port);
13159 break;
13160 case INTEL_OUTPUT_DP_MST:
13161 used_mst_ports |=
13162 1 << encoder->port;
13163 break;
13164 default:
13165 break;
13166 }
13167 }
13168 drm_connector_list_iter_end(&conn_iter);
13169
13170 /* can't mix MST and SST/HDMI on the same port */
13171 if (used_ports & used_mst_ports)
13172 return false;
13173
13174 return ret;
13175 }
13176
13177 static void
13178 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
13179 {
13180 intel_crtc_copy_color_blobs(crtc_state);
13181 }
13182
13183 static void
13184 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
13185 {
13186 crtc_state->hw.enable = crtc_state->uapi.enable;
13187 crtc_state->hw.active = crtc_state->uapi.active;
13188 crtc_state->hw.mode = crtc_state->uapi.mode;
13189 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
13190 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
13191 }
13192
13193 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
13194 {
13195 crtc_state->uapi.enable = crtc_state->hw.enable;
13196 crtc_state->uapi.active = crtc_state->hw.active;
13197 drm_WARN_ON(crtc_state->uapi.crtc->dev,
13198 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
13199
13200 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
13201
13202 /* copy color blobs to uapi */
13203 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
13204 crtc_state->hw.degamma_lut);
13205 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
13206 crtc_state->hw.gamma_lut);
13207 drm_property_replace_blob(&crtc_state->uapi.ctm,
13208 crtc_state->hw.ctm);
13209 }
13210
13211 static int
13212 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
13213 {
13214 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13215 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13216 struct intel_crtc_state *saved_state;
13217
13218 saved_state = intel_crtc_state_alloc(crtc);
13219 if (!saved_state)
13220 return -ENOMEM;
13221
13222 /* free the old crtc_state->hw members */
13223 intel_crtc_free_hw_state(crtc_state);
13224
13225 /* FIXME: before the switch to atomic started, a new pipe_config was
13226 * kzalloc'd. Code that depends on any field being zero should be
13227 * fixed, so that the crtc_state can be safely duplicated. For now,
13228 * only fields that are know to not cause problems are preserved. */
13229
13230 saved_state->uapi = crtc_state->uapi;
13231 saved_state->scaler_state = crtc_state->scaler_state;
13232 saved_state->shared_dpll = crtc_state->shared_dpll;
13233 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13234 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
13235 sizeof(saved_state->icl_port_dplls));
13236 saved_state->crc_enabled = crtc_state->crc_enabled;
13237 if (IS_G4X(dev_priv) ||
13238 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13239 saved_state->wm = crtc_state->wm;
13240
13241 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13242 kfree(saved_state);
13243
13244 intel_crtc_copy_uapi_to_hw_state(crtc_state);
13245
13246 return 0;
13247 }
13248
13249 static int
13250 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
13251 {
13252 struct drm_crtc *crtc = pipe_config->uapi.crtc;
13253 struct drm_atomic_state *state = pipe_config->uapi.state;
13254 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13255 struct drm_connector *connector;
13256 struct drm_connector_state *connector_state;
13257 int base_bpp, ret, i;
13258 bool retry = true;
13259
13260 pipe_config->cpu_transcoder =
13261 (enum transcoder) to_intel_crtc(crtc)->pipe;
13262
13263 /*
13264 * Sanitize sync polarity flags based on requested ones. If neither
13265 * positive or negative polarity is requested, treat this as meaning
13266 * negative polarity.
13267 */
13268 if (!(pipe_config->hw.adjusted_mode.flags &
13269 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13270 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13271
13272 if (!(pipe_config->hw.adjusted_mode.flags &
13273 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13274 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13275
13276 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13277 pipe_config);
13278 if (ret)
13279 return ret;
13280
13281 base_bpp = pipe_config->pipe_bpp;
13282
13283 /*
13284 * Determine the real pipe dimensions. Note that stereo modes can
13285 * increase the actual pipe size due to the frame doubling and
13286 * insertion of additional space for blanks between the frame. This
13287 * is stored in the crtc timings. We use the requested mode to do this
13288 * computation to clearly distinguish it from the adjusted mode, which
13289 * can be changed by the connectors in the below retry loop.
13290 */
13291 drm_mode_get_hv_timing(&pipe_config->hw.mode,
13292 &pipe_config->pipe_src_w,
13293 &pipe_config->pipe_src_h);
13294
13295 for_each_new_connector_in_state(state, connector, connector_state, i) {
13296 struct intel_encoder *encoder =
13297 to_intel_encoder(connector_state->best_encoder);
13298
13299 if (connector_state->crtc != crtc)
13300 continue;
13301
13302 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13303 drm_dbg_kms(&i915->drm,
13304 "rejecting invalid cloning configuration\n");
13305 return -EINVAL;
13306 }
13307
13308 /*
13309 * Determine output_types before calling the .compute_config()
13310 * hooks so that the hooks can use this information safely.
13311 */
13312 if (encoder->compute_output_type)
13313 pipe_config->output_types |=
13314 BIT(encoder->compute_output_type(encoder, pipe_config,
13315 connector_state));
13316 else
13317 pipe_config->output_types |= BIT(encoder->type);
13318 }
13319
13320 encoder_retry:
13321 /* Ensure the port clock defaults are reset when retrying. */
13322 pipe_config->port_clock = 0;
13323 pipe_config->pixel_multiplier = 1;
13324
13325 /* Fill in default crtc timings, allow encoders to overwrite them. */
13326 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
13327 CRTC_STEREO_DOUBLE);
13328
13329 /* Pass our mode to the connectors and the CRTC to give them a chance to
13330 * adjust it according to limitations or connector properties, and also
13331 * a chance to reject the mode entirely.
13332 */
13333 for_each_new_connector_in_state(state, connector, connector_state, i) {
13334 struct intel_encoder *encoder =
13335 to_intel_encoder(connector_state->best_encoder);
13336
13337 if (connector_state->crtc != crtc)
13338 continue;
13339
13340 ret = encoder->compute_config(encoder, pipe_config,
13341 connector_state);
13342 if (ret < 0) {
13343 if (ret != -EDEADLK)
13344 drm_dbg_kms(&i915->drm,
13345 "Encoder config failure: %d\n",
13346 ret);
13347 return ret;
13348 }
13349 }
13350
13351 /* Set default port clock if not overwritten by the encoder. Needs to be
13352 * done afterwards in case the encoder adjusts the mode. */
13353 if (!pipe_config->port_clock)
13354 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
13355 * pipe_config->pixel_multiplier;
13356
13357 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13358 if (ret == -EDEADLK)
13359 return ret;
13360 if (ret < 0) {
13361 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
13362 return ret;
13363 }
13364
13365 if (ret == RETRY) {
13366 if (drm_WARN(&i915->drm, !retry,
13367 "loop in pipe configuration computation\n"))
13368 return -EINVAL;
13369
13370 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
13371 retry = false;
13372 goto encoder_retry;
13373 }
13374
13375 /* Dithering seems to not pass-through bits correctly when it should, so
13376 * only enable it on 6bpc panels and when its not a compliance
13377 * test requesting 6bpc video pattern.
13378 */
13379 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
13380 !pipe_config->dither_force_disable;
13381 drm_dbg_kms(&i915->drm,
13382 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13383 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13384
13385 /*
13386 * Make drm_calc_timestamping_constants in
13387 * drm_atomic_helper_update_legacy_modeset_state() happy
13388 */
13389 pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
13390
13391 return 0;
13392 }
13393
13394 static int
13395 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
13396 {
13397 struct intel_atomic_state *state =
13398 to_intel_atomic_state(crtc_state->uapi.state);
13399 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13400 struct drm_connector_state *conn_state;
13401 struct drm_connector *connector;
13402 int i;
13403
13404 for_each_new_connector_in_state(&state->base, connector,
13405 conn_state, i) {
13406 struct intel_encoder *encoder =
13407 to_intel_encoder(conn_state->best_encoder);
13408 int ret;
13409
13410 if (conn_state->crtc != &crtc->base ||
13411 !encoder->compute_config_late)
13412 continue;
13413
13414 ret = encoder->compute_config_late(encoder, crtc_state,
13415 conn_state);
13416 if (ret)
13417 return ret;
13418 }
13419
13420 return 0;
13421 }
13422
13423 bool intel_fuzzy_clock_check(int clock1, int clock2)
13424 {
13425 int diff;
13426
13427 if (clock1 == clock2)
13428 return true;
13429
13430 if (!clock1 || !clock2)
13431 return false;
13432
13433 diff = abs(clock1 - clock2);
13434
13435 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13436 return true;
13437
13438 return false;
13439 }
13440
13441 static bool
13442 intel_compare_m_n(unsigned int m, unsigned int n,
13443 unsigned int m2, unsigned int n2,
13444 bool exact)
13445 {
13446 if (m == m2 && n == n2)
13447 return true;
13448
13449 if (exact || !m || !n || !m2 || !n2)
13450 return false;
13451
13452 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13453
13454 if (n > n2) {
13455 while (n > n2) {
13456 m2 <<= 1;
13457 n2 <<= 1;
13458 }
13459 } else if (n < n2) {
13460 while (n < n2) {
13461 m <<= 1;
13462 n <<= 1;
13463 }
13464 }
13465
13466 if (n != n2)
13467 return false;
13468
13469 return intel_fuzzy_clock_check(m, m2);
13470 }
13471
13472 static bool
13473 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13474 const struct intel_link_m_n *m2_n2,
13475 bool exact)
13476 {
13477 return m_n->tu == m2_n2->tu &&
13478 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13479 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13480 intel_compare_m_n(m_n->link_m, m_n->link_n,
13481 m2_n2->link_m, m2_n2->link_n, exact);
13482 }
13483
13484 static bool
13485 intel_compare_infoframe(const union hdmi_infoframe *a,
13486 const union hdmi_infoframe *b)
13487 {
13488 return memcmp(a, b, sizeof(*a)) == 0;
13489 }
13490
13491 static bool
13492 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
13493 const struct drm_dp_vsc_sdp *b)
13494 {
13495 return memcmp(a, b, sizeof(*a)) == 0;
13496 }
13497
13498 static void
13499 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13500 bool fastset, const char *name,
13501 const union hdmi_infoframe *a,
13502 const union hdmi_infoframe *b)
13503 {
13504 if (fastset) {
13505 if (!drm_debug_enabled(DRM_UT_KMS))
13506 return;
13507
13508 drm_dbg_kms(&dev_priv->drm,
13509 "fastset mismatch in %s infoframe\n", name);
13510 drm_dbg_kms(&dev_priv->drm, "expected:\n");
13511 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13512 drm_dbg_kms(&dev_priv->drm, "found:\n");
13513 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13514 } else {
13515 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
13516 drm_err(&dev_priv->drm, "expected:\n");
13517 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13518 drm_err(&dev_priv->drm, "found:\n");
13519 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13520 }
13521 }
13522
13523 static void
13524 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
13525 bool fastset, const char *name,
13526 const struct drm_dp_vsc_sdp *a,
13527 const struct drm_dp_vsc_sdp *b)
13528 {
13529 if (fastset) {
13530 if (!drm_debug_enabled(DRM_UT_KMS))
13531 return;
13532
13533 drm_dbg_kms(&dev_priv->drm,
13534 "fastset mismatch in %s dp sdp\n", name);
13535 drm_dbg_kms(&dev_priv->drm, "expected:\n");
13536 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
13537 drm_dbg_kms(&dev_priv->drm, "found:\n");
13538 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
13539 } else {
13540 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
13541 drm_err(&dev_priv->drm, "expected:\n");
13542 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
13543 drm_err(&dev_priv->drm, "found:\n");
13544 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
13545 }
13546 }
13547
13548 static void __printf(4, 5)
13549 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13550 const char *name, const char *format, ...)
13551 {
13552 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
13553 struct va_format vaf;
13554 va_list args;
13555
13556 va_start(args, format);
13557 vaf.fmt = format;
13558 vaf.va = &args;
13559
13560 if (fastset)
13561 drm_dbg_kms(&i915->drm,
13562 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13563 crtc->base.base.id, crtc->base.name, name, &vaf);
13564 else
13565 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
13566 crtc->base.base.id, crtc->base.name, name, &vaf);
13567
13568 va_end(args);
13569 }
13570
13571 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13572 {
13573 if (i915_modparams.fastboot != -1)
13574 return i915_modparams.fastboot;
13575
13576 /* Enable fastboot by default on Skylake and newer */
13577 if (INTEL_GEN(dev_priv) >= 9)
13578 return true;
13579
13580 /* Enable fastboot by default on VLV and CHV */
13581 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13582 return true;
13583
13584 /* Disabled by default on all others */
13585 return false;
13586 }
13587
13588 static bool
13589 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13590 const struct intel_crtc_state *pipe_config,
13591 bool fastset)
13592 {
13593 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13594 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13595 bool ret = true;
13596 u32 bp_gamma = 0;
13597 bool fixup_inherited = fastset &&
13598 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13599 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13600
13601 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13602 drm_dbg_kms(&dev_priv->drm,
13603 "initial modeset and fastboot not set\n");
13604 ret = false;
13605 }
13606
13607 #define PIPE_CONF_CHECK_X(name) do { \
13608 if (current_config->name != pipe_config->name) { \
13609 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13610 "(expected 0x%08x, found 0x%08x)", \
13611 current_config->name, \
13612 pipe_config->name); \
13613 ret = false; \
13614 } \
13615 } while (0)
13616
13617 #define PIPE_CONF_CHECK_I(name) do { \
13618 if (current_config->name != pipe_config->name) { \
13619 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13620 "(expected %i, found %i)", \
13621 current_config->name, \
13622 pipe_config->name); \
13623 ret = false; \
13624 } \
13625 } while (0)
13626
13627 #define PIPE_CONF_CHECK_BOOL(name) do { \
13628 if (current_config->name != pipe_config->name) { \
13629 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13630 "(expected %s, found %s)", \
13631 yesno(current_config->name), \
13632 yesno(pipe_config->name)); \
13633 ret = false; \
13634 } \
13635 } while (0)
13636
13637 /*
13638 * Checks state where we only read out the enabling, but not the entire
13639 * state itself (like full infoframes or ELD for audio). These states
13640 * require a full modeset on bootup to fix up.
13641 */
13642 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13643 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13644 PIPE_CONF_CHECK_BOOL(name); \
13645 } else { \
13646 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13647 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13648 yesno(current_config->name), \
13649 yesno(pipe_config->name)); \
13650 ret = false; \
13651 } \
13652 } while (0)
13653
13654 #define PIPE_CONF_CHECK_P(name) do { \
13655 if (current_config->name != pipe_config->name) { \
13656 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13657 "(expected %p, found %p)", \
13658 current_config->name, \
13659 pipe_config->name); \
13660 ret = false; \
13661 } \
13662 } while (0)
13663
13664 #define PIPE_CONF_CHECK_M_N(name) do { \
13665 if (!intel_compare_link_m_n(&current_config->name, \
13666 &pipe_config->name,\
13667 !fastset)) { \
13668 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13669 "(expected tu %i gmch %i/%i link %i/%i, " \
13670 "found tu %i, gmch %i/%i link %i/%i)", \
13671 current_config->name.tu, \
13672 current_config->name.gmch_m, \
13673 current_config->name.gmch_n, \
13674 current_config->name.link_m, \
13675 current_config->name.link_n, \
13676 pipe_config->name.tu, \
13677 pipe_config->name.gmch_m, \
13678 pipe_config->name.gmch_n, \
13679 pipe_config->name.link_m, \
13680 pipe_config->name.link_n); \
13681 ret = false; \
13682 } \
13683 } while (0)
13684
13685 /* This is required for BDW+ where there is only one set of registers for
13686 * switching between high and low RR.
13687 * This macro can be used whenever a comparison has to be made between one
13688 * hw state and multiple sw state variables.
13689 */
13690 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13691 if (!intel_compare_link_m_n(&current_config->name, \
13692 &pipe_config->name, !fastset) && \
13693 !intel_compare_link_m_n(&current_config->alt_name, \
13694 &pipe_config->name, !fastset)) { \
13695 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13696 "(expected tu %i gmch %i/%i link %i/%i, " \
13697 "or tu %i gmch %i/%i link %i/%i, " \
13698 "found tu %i, gmch %i/%i link %i/%i)", \
13699 current_config->name.tu, \
13700 current_config->name.gmch_m, \
13701 current_config->name.gmch_n, \
13702 current_config->name.link_m, \
13703 current_config->name.link_n, \
13704 current_config->alt_name.tu, \
13705 current_config->alt_name.gmch_m, \
13706 current_config->alt_name.gmch_n, \
13707 current_config->alt_name.link_m, \
13708 current_config->alt_name.link_n, \
13709 pipe_config->name.tu, \
13710 pipe_config->name.gmch_m, \
13711 pipe_config->name.gmch_n, \
13712 pipe_config->name.link_m, \
13713 pipe_config->name.link_n); \
13714 ret = false; \
13715 } \
13716 } while (0)
13717
13718 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13719 if ((current_config->name ^ pipe_config->name) & (mask)) { \
13720 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13721 "(%x) (expected %i, found %i)", \
13722 (mask), \
13723 current_config->name & (mask), \
13724 pipe_config->name & (mask)); \
13725 ret = false; \
13726 } \
13727 } while (0)
13728
13729 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13730 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13731 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13732 "(expected %i, found %i)", \
13733 current_config->name, \
13734 pipe_config->name); \
13735 ret = false; \
13736 } \
13737 } while (0)
13738
13739 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13740 if (!intel_compare_infoframe(&current_config->infoframes.name, \
13741 &pipe_config->infoframes.name)) { \
13742 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13743 &current_config->infoframes.name, \
13744 &pipe_config->infoframes.name); \
13745 ret = false; \
13746 } \
13747 } while (0)
13748
13749 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
13750 if (!current_config->has_psr && !pipe_config->has_psr && \
13751 !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
13752 &pipe_config->infoframes.name)) { \
13753 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
13754 &current_config->infoframes.name, \
13755 &pipe_config->infoframes.name); \
13756 ret = false; \
13757 } \
13758 } while (0)
13759
13760 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13761 if (current_config->name1 != pipe_config->name1) { \
13762 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13763 "(expected %i, found %i, won't compare lut values)", \
13764 current_config->name1, \
13765 pipe_config->name1); \
13766 ret = false;\
13767 } else { \
13768 if (!intel_color_lut_equal(current_config->name2, \
13769 pipe_config->name2, pipe_config->name1, \
13770 bit_precision)) { \
13771 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13772 "hw_state doesn't match sw_state"); \
13773 ret = false; \
13774 } \
13775 } \
13776 } while (0)
13777
13778 #define PIPE_CONF_QUIRK(quirk) \
13779 ((current_config->quirks | pipe_config->quirks) & (quirk))
13780
13781 PIPE_CONF_CHECK_I(cpu_transcoder);
13782
13783 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13784 PIPE_CONF_CHECK_I(fdi_lanes);
13785 PIPE_CONF_CHECK_M_N(fdi_m_n);
13786
13787 PIPE_CONF_CHECK_I(lane_count);
13788 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13789
13790 if (INTEL_GEN(dev_priv) < 8) {
13791 PIPE_CONF_CHECK_M_N(dp_m_n);
13792
13793 if (current_config->has_drrs)
13794 PIPE_CONF_CHECK_M_N(dp_m2_n2);
13795 } else
13796 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13797
13798 PIPE_CONF_CHECK_X(output_types);
13799
13800 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13801 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13802 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13803 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13804 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13805 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13806
13807 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13808 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13809 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13810 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13811 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13812 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13813
13814 PIPE_CONF_CHECK_I(pixel_multiplier);
13815 PIPE_CONF_CHECK_I(output_format);
13816 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13817 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13818 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13819 PIPE_CONF_CHECK_BOOL(limited_color_range);
13820
13821 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13822 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13823 PIPE_CONF_CHECK_BOOL(has_infoframe);
13824 PIPE_CONF_CHECK_BOOL(fec_enable);
13825
13826 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13827
13828 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13829 DRM_MODE_FLAG_INTERLACE);
13830
13831 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13832 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13833 DRM_MODE_FLAG_PHSYNC);
13834 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13835 DRM_MODE_FLAG_NHSYNC);
13836 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13837 DRM_MODE_FLAG_PVSYNC);
13838 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13839 DRM_MODE_FLAG_NVSYNC);
13840 }
13841
13842 PIPE_CONF_CHECK_X(gmch_pfit.control);
13843 /* pfit ratios are autocomputed by the hw on gen4+ */
13844 if (INTEL_GEN(dev_priv) < 4)
13845 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13846 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13847
13848 /*
13849 * Changing the EDP transcoder input mux
13850 * (A_ONOFF vs. A_ON) requires a full modeset.
13851 */
13852 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13853
13854 if (!fastset) {
13855 PIPE_CONF_CHECK_I(pipe_src_w);
13856 PIPE_CONF_CHECK_I(pipe_src_h);
13857
13858 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13859 if (current_config->pch_pfit.enabled) {
13860 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
13861 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
13862 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
13863 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
13864 }
13865
13866 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13867 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13868
13869 PIPE_CONF_CHECK_X(gamma_mode);
13870 if (IS_CHERRYVIEW(dev_priv))
13871 PIPE_CONF_CHECK_X(cgm_mode);
13872 else
13873 PIPE_CONF_CHECK_X(csc_mode);
13874 PIPE_CONF_CHECK_BOOL(gamma_enable);
13875 PIPE_CONF_CHECK_BOOL(csc_enable);
13876
13877 PIPE_CONF_CHECK_I(linetime);
13878 PIPE_CONF_CHECK_I(ips_linetime);
13879
13880 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13881 if (bp_gamma)
13882 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13883 }
13884
13885 PIPE_CONF_CHECK_BOOL(double_wide);
13886
13887 PIPE_CONF_CHECK_P(shared_dpll);
13888 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13889 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13890 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13891 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13892 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13893 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13894 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13895 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13896 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13897 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13898 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13899 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13900 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13901 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13902 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13903 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13904 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13905 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13906 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13907 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13908 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13909 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13910 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13911 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13912 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13913 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13914 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13915 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13916 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13917 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13918 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13919
13920 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13921 PIPE_CONF_CHECK_X(dsi_pll.div);
13922
13923 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13924 PIPE_CONF_CHECK_I(pipe_bpp);
13925
13926 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13927 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13928
13929 PIPE_CONF_CHECK_I(min_voltage_level);
13930
13931 PIPE_CONF_CHECK_X(infoframes.enable);
13932 PIPE_CONF_CHECK_X(infoframes.gcp);
13933 PIPE_CONF_CHECK_INFOFRAME(avi);
13934 PIPE_CONF_CHECK_INFOFRAME(spd);
13935 PIPE_CONF_CHECK_INFOFRAME(hdmi);
13936 PIPE_CONF_CHECK_INFOFRAME(drm);
13937 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
13938
13939 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
13940 PIPE_CONF_CHECK_I(master_transcoder);
13941
13942 PIPE_CONF_CHECK_I(dsc.compression_enable);
13943 PIPE_CONF_CHECK_I(dsc.dsc_split);
13944 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
13945
13946 PIPE_CONF_CHECK_I(mst_master_transcoder);
13947
13948 #undef PIPE_CONF_CHECK_X
13949 #undef PIPE_CONF_CHECK_I
13950 #undef PIPE_CONF_CHECK_BOOL
13951 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13952 #undef PIPE_CONF_CHECK_P
13953 #undef PIPE_CONF_CHECK_FLAGS
13954 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13955 #undef PIPE_CONF_CHECK_COLOR_LUT
13956 #undef PIPE_CONF_QUIRK
13957
13958 return ret;
13959 }
13960
13961 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13962 const struct intel_crtc_state *pipe_config)
13963 {
13964 if (pipe_config->has_pch_encoder) {
13965 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13966 &pipe_config->fdi_m_n);
13967 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13968
13969 /*
13970 * FDI already provided one idea for the dotclock.
13971 * Yell if the encoder disagrees.
13972 */
13973 drm_WARN(&dev_priv->drm,
13974 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13975 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13976 fdi_dotclock, dotclock);
13977 }
13978 }
13979
13980 static void verify_wm_state(struct intel_crtc *crtc,
13981 struct intel_crtc_state *new_crtc_state)
13982 {
13983 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13984 struct skl_hw_state {
13985 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13986 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13987 struct skl_pipe_wm wm;
13988 } *hw;
13989 struct skl_pipe_wm *sw_wm;
13990 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13991 u8 hw_enabled_slices;
13992 const enum pipe pipe = crtc->pipe;
13993 int plane, level, max_level = ilk_wm_max_level(dev_priv);
13994
13995 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13996 return;
13997
13998 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13999 if (!hw)
14000 return;
14001
14002 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
14003 sw_wm = &new_crtc_state->wm.skl.optimal;
14004
14005 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
14006
14007 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
14008
14009 if (INTEL_GEN(dev_priv) >= 11 &&
14010 hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
14011 drm_err(&dev_priv->drm,
14012 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
14013 dev_priv->enabled_dbuf_slices_mask,
14014 hw_enabled_slices);
14015
14016 /* planes */
14017 for_each_universal_plane(dev_priv, pipe, plane) {
14018 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14019
14020 hw_plane_wm = &hw->wm.planes[plane];
14021 sw_plane_wm = &sw_wm->planes[plane];
14022
14023 /* Watermarks */
14024 for (level = 0; level <= max_level; level++) {
14025 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14026 &sw_plane_wm->wm[level]) ||
14027 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14028 &sw_plane_wm->sagv_wm0)))
14029 continue;
14030
14031 drm_err(&dev_priv->drm,
14032 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14033 pipe_name(pipe), plane + 1, level,
14034 sw_plane_wm->wm[level].plane_en,
14035 sw_plane_wm->wm[level].plane_res_b,
14036 sw_plane_wm->wm[level].plane_res_l,
14037 hw_plane_wm->wm[level].plane_en,
14038 hw_plane_wm->wm[level].plane_res_b,
14039 hw_plane_wm->wm[level].plane_res_l);
14040 }
14041
14042 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14043 &sw_plane_wm->trans_wm)) {
14044 drm_err(&dev_priv->drm,
14045 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14046 pipe_name(pipe), plane + 1,
14047 sw_plane_wm->trans_wm.plane_en,
14048 sw_plane_wm->trans_wm.plane_res_b,
14049 sw_plane_wm->trans_wm.plane_res_l,
14050 hw_plane_wm->trans_wm.plane_en,
14051 hw_plane_wm->trans_wm.plane_res_b,
14052 hw_plane_wm->trans_wm.plane_res_l);
14053 }
14054
14055 /* DDB */
14056 hw_ddb_entry = &hw->ddb_y[plane];
14057 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
14058
14059 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14060 drm_err(&dev_priv->drm,
14061 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
14062 pipe_name(pipe), plane + 1,
14063 sw_ddb_entry->start, sw_ddb_entry->end,
14064 hw_ddb_entry->start, hw_ddb_entry->end);
14065 }
14066 }
14067
14068 /*
14069 * cursor
14070 * If the cursor plane isn't active, we may not have updated it's ddb
14071 * allocation. In that case since the ddb allocation will be updated
14072 * once the plane becomes visible, we can skip this check
14073 */
14074 if (1) {
14075 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14076
14077 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
14078 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
14079
14080 /* Watermarks */
14081 for (level = 0; level <= max_level; level++) {
14082 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14083 &sw_plane_wm->wm[level]) ||
14084 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14085 &sw_plane_wm->sagv_wm0)))
14086 continue;
14087
14088 drm_err(&dev_priv->drm,
14089 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14090 pipe_name(pipe), level,
14091 sw_plane_wm->wm[level].plane_en,
14092 sw_plane_wm->wm[level].plane_res_b,
14093 sw_plane_wm->wm[level].plane_res_l,
14094 hw_plane_wm->wm[level].plane_en,
14095 hw_plane_wm->wm[level].plane_res_b,
14096 hw_plane_wm->wm[level].plane_res_l);
14097 }
14098
14099 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14100 &sw_plane_wm->trans_wm)) {
14101 drm_err(&dev_priv->drm,
14102 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14103 pipe_name(pipe),
14104 sw_plane_wm->trans_wm.plane_en,
14105 sw_plane_wm->trans_wm.plane_res_b,
14106 sw_plane_wm->trans_wm.plane_res_l,
14107 hw_plane_wm->trans_wm.plane_en,
14108 hw_plane_wm->trans_wm.plane_res_b,
14109 hw_plane_wm->trans_wm.plane_res_l);
14110 }
14111
14112 /* DDB */
14113 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
14114 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
14115
14116 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14117 drm_err(&dev_priv->drm,
14118 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
14119 pipe_name(pipe),
14120 sw_ddb_entry->start, sw_ddb_entry->end,
14121 hw_ddb_entry->start, hw_ddb_entry->end);
14122 }
14123 }
14124
14125 kfree(hw);
14126 }
14127
14128 static void
14129 verify_connector_state(struct intel_atomic_state *state,
14130 struct intel_crtc *crtc)
14131 {
14132 struct drm_connector *connector;
14133 struct drm_connector_state *new_conn_state;
14134 int i;
14135
14136 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
14137 struct drm_encoder *encoder = connector->encoder;
14138 struct intel_crtc_state *crtc_state = NULL;
14139
14140 if (new_conn_state->crtc != &crtc->base)
14141 continue;
14142
14143 if (crtc)
14144 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
14145
14146 intel_connector_verify_state(crtc_state, new_conn_state);
14147
14148 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
14149 "connector's atomic encoder doesn't match legacy encoder\n");
14150 }
14151 }
14152
14153 static void
14154 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
14155 {
14156 struct intel_encoder *encoder;
14157 struct drm_connector *connector;
14158 struct drm_connector_state *old_conn_state, *new_conn_state;
14159 int i;
14160
14161 for_each_intel_encoder(&dev_priv->drm, encoder) {
14162 bool enabled = false, found = false;
14163 enum pipe pipe;
14164
14165 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
14166 encoder->base.base.id,
14167 encoder->base.name);
14168
14169 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
14170 new_conn_state, i) {
14171 if (old_conn_state->best_encoder == &encoder->base)
14172 found = true;
14173
14174 if (new_conn_state->best_encoder != &encoder->base)
14175 continue;
14176 found = enabled = true;
14177
14178 I915_STATE_WARN(new_conn_state->crtc !=
14179 encoder->base.crtc,
14180 "connector's crtc doesn't match encoder crtc\n");
14181 }
14182
14183 if (!found)
14184 continue;
14185
14186 I915_STATE_WARN(!!encoder->base.crtc != enabled,
14187 "encoder's enabled state mismatch "
14188 "(expected %i, found %i)\n",
14189 !!encoder->base.crtc, enabled);
14190
14191 if (!encoder->base.crtc) {
14192 bool active;
14193
14194 active = encoder->get_hw_state(encoder, &pipe);
14195 I915_STATE_WARN(active,
14196 "encoder detached but still enabled on pipe %c.\n",
14197 pipe_name(pipe));
14198 }
14199 }
14200 }
14201
14202 static void
14203 verify_crtc_state(struct intel_crtc *crtc,
14204 struct intel_crtc_state *old_crtc_state,
14205 struct intel_crtc_state *new_crtc_state)
14206 {
14207 struct drm_device *dev = crtc->base.dev;
14208 struct drm_i915_private *dev_priv = to_i915(dev);
14209 struct intel_encoder *encoder;
14210 struct intel_crtc_state *pipe_config = old_crtc_state;
14211 struct drm_atomic_state *state = old_crtc_state->uapi.state;
14212 bool active;
14213
14214 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
14215 intel_crtc_free_hw_state(old_crtc_state);
14216 intel_crtc_state_reset(old_crtc_state, crtc);
14217 old_crtc_state->uapi.state = state;
14218
14219 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
14220 crtc->base.name);
14221
14222 active = dev_priv->display.get_pipe_config(crtc, pipe_config);
14223
14224 /* we keep both pipes enabled on 830 */
14225 if (IS_I830(dev_priv))
14226 active = new_crtc_state->hw.active;
14227
14228 I915_STATE_WARN(new_crtc_state->hw.active != active,
14229 "crtc active state doesn't match with hw state "
14230 "(expected %i, found %i)\n",
14231 new_crtc_state->hw.active, active);
14232
14233 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
14234 "transitional active state does not match atomic hw state "
14235 "(expected %i, found %i)\n",
14236 new_crtc_state->hw.active, crtc->active);
14237
14238 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
14239 enum pipe pipe;
14240
14241 active = encoder->get_hw_state(encoder, &pipe);
14242 I915_STATE_WARN(active != new_crtc_state->hw.active,
14243 "[ENCODER:%i] active %i with crtc active %i\n",
14244 encoder->base.base.id, active,
14245 new_crtc_state->hw.active);
14246
14247 I915_STATE_WARN(active && crtc->pipe != pipe,
14248 "Encoder connected to wrong pipe %c\n",
14249 pipe_name(pipe));
14250
14251 if (active)
14252 encoder->get_config(encoder, pipe_config);
14253 }
14254
14255 intel_crtc_compute_pixel_rate(pipe_config);
14256
14257 if (!new_crtc_state->hw.active)
14258 return;
14259
14260 intel_pipe_config_sanity_check(dev_priv, pipe_config);
14261
14262 if (!intel_pipe_config_compare(new_crtc_state,
14263 pipe_config, false)) {
14264 I915_STATE_WARN(1, "pipe state doesn't match!\n");
14265 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
14266 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
14267 }
14268 }
14269
14270 static void
14271 intel_verify_planes(struct intel_atomic_state *state)
14272 {
14273 struct intel_plane *plane;
14274 const struct intel_plane_state *plane_state;
14275 int i;
14276
14277 for_each_new_intel_plane_in_state(state, plane,
14278 plane_state, i)
14279 assert_plane(plane, plane_state->planar_slave ||
14280 plane_state->uapi.visible);
14281 }
14282
14283 static void
14284 verify_single_dpll_state(struct drm_i915_private *dev_priv,
14285 struct intel_shared_dpll *pll,
14286 struct intel_crtc *crtc,
14287 struct intel_crtc_state *new_crtc_state)
14288 {
14289 struct intel_dpll_hw_state dpll_hw_state;
14290 unsigned int crtc_mask;
14291 bool active;
14292
14293 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
14294
14295 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
14296
14297 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
14298
14299 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
14300 I915_STATE_WARN(!pll->on && pll->active_mask,
14301 "pll in active use but not on in sw tracking\n");
14302 I915_STATE_WARN(pll->on && !pll->active_mask,
14303 "pll is on but not used by any active crtc\n");
14304 I915_STATE_WARN(pll->on != active,
14305 "pll on state mismatch (expected %i, found %i)\n",
14306 pll->on, active);
14307 }
14308
14309 if (!crtc) {
14310 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
14311 "more active pll users than references: %x vs %x\n",
14312 pll->active_mask, pll->state.crtc_mask);
14313
14314 return;
14315 }
14316
14317 crtc_mask = drm_crtc_mask(&crtc->base);
14318
14319 if (new_crtc_state->hw.active)
14320 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
14321 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
14322 pipe_name(crtc->pipe), pll->active_mask);
14323 else
14324 I915_STATE_WARN(pll->active_mask & crtc_mask,
14325 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
14326 pipe_name(crtc->pipe), pll->active_mask);
14327
14328 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
14329 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
14330 crtc_mask, pll->state.crtc_mask);
14331
14332 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
14333 &dpll_hw_state,
14334 sizeof(dpll_hw_state)),
14335 "pll hw state mismatch\n");
14336 }
14337
14338 static void
14339 verify_shared_dpll_state(struct intel_crtc *crtc,
14340 struct intel_crtc_state *old_crtc_state,
14341 struct intel_crtc_state *new_crtc_state)
14342 {
14343 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14344
14345 if (new_crtc_state->shared_dpll)
14346 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
14347
14348 if (old_crtc_state->shared_dpll &&
14349 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
14350 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
14351 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
14352
14353 I915_STATE_WARN(pll->active_mask & crtc_mask,
14354 "pll active mismatch (didn't expect pipe %c in active mask)\n",
14355 pipe_name(crtc->pipe));
14356 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
14357 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
14358 pipe_name(crtc->pipe));
14359 }
14360 }
14361
14362 static void
14363 intel_modeset_verify_crtc(struct intel_crtc *crtc,
14364 struct intel_atomic_state *state,
14365 struct intel_crtc_state *old_crtc_state,
14366 struct intel_crtc_state *new_crtc_state)
14367 {
14368 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
14369 return;
14370
14371 verify_wm_state(crtc, new_crtc_state);
14372 verify_connector_state(state, crtc);
14373 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
14374 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
14375 }
14376
14377 static void
14378 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
14379 {
14380 int i;
14381
14382 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
14383 verify_single_dpll_state(dev_priv,
14384 &dev_priv->dpll.shared_dplls[i],
14385 NULL, NULL);
14386 }
14387
14388 static void
14389 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
14390 struct intel_atomic_state *state)
14391 {
14392 verify_encoder_state(dev_priv, state);
14393 verify_connector_state(state, NULL);
14394 verify_disabled_dpll_state(dev_priv);
14395 }
14396
14397 static void
14398 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
14399 {
14400 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14402 const struct drm_display_mode *adjusted_mode =
14403 &crtc_state->hw.adjusted_mode;
14404
14405 drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
14406
14407 /*
14408 * The scanline counter increments at the leading edge of hsync.
14409 *
14410 * On most platforms it starts counting from vtotal-1 on the
14411 * first active line. That means the scanline counter value is
14412 * always one less than what we would expect. Ie. just after
14413 * start of vblank, which also occurs at start of hsync (on the
14414 * last active line), the scanline counter will read vblank_start-1.
14415 *
14416 * On gen2 the scanline counter starts counting from 1 instead
14417 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
14418 * to keep the value positive), instead of adding one.
14419 *
14420 * On HSW+ the behaviour of the scanline counter depends on the output
14421 * type. For DP ports it behaves like most other platforms, but on HDMI
14422 * there's an extra 1 line difference. So we need to add two instead of
14423 * one to the value.
14424 *
14425 * On VLV/CHV DSI the scanline counter would appear to increment
14426 * approx. 1/3 of a scanline before start of vblank. Unfortunately
14427 * that means we can't tell whether we're in vblank or not while
14428 * we're on that particular line. We must still set scanline_offset
14429 * to 1 so that the vblank timestamps come out correct when we query
14430 * the scanline counter from within the vblank interrupt handler.
14431 * However if queried just before the start of vblank we'll get an
14432 * answer that's slightly in the future.
14433 */
14434 if (IS_GEN(dev_priv, 2)) {
14435 int vtotal;
14436
14437 vtotal = adjusted_mode->crtc_vtotal;
14438 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
14439 vtotal /= 2;
14440
14441 crtc->scanline_offset = vtotal - 1;
14442 } else if (HAS_DDI(dev_priv) &&
14443 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
14444 crtc->scanline_offset = 2;
14445 } else {
14446 crtc->scanline_offset = 1;
14447 }
14448 }
14449
14450 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
14451 {
14452 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14453 struct intel_crtc_state *new_crtc_state;
14454 struct intel_crtc *crtc;
14455 int i;
14456
14457 if (!dev_priv->display.crtc_compute_clock)
14458 return;
14459
14460 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14461 if (!needs_modeset(new_crtc_state))
14462 continue;
14463
14464 intel_release_shared_dplls(state, crtc);
14465 }
14466 }
14467
14468 /*
14469 * This implements the workaround described in the "notes" section of the mode
14470 * set sequence documentation. When going from no pipes or single pipe to
14471 * multiple pipes, and planes are enabled after the pipe, we need to wait at
14472 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
14473 */
14474 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
14475 {
14476 struct intel_crtc_state *crtc_state;
14477 struct intel_crtc *crtc;
14478 struct intel_crtc_state *first_crtc_state = NULL;
14479 struct intel_crtc_state *other_crtc_state = NULL;
14480 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14481 int i;
14482
14483 /* look at all crtc's that are going to be enabled in during modeset */
14484 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14485 if (!crtc_state->hw.active ||
14486 !needs_modeset(crtc_state))
14487 continue;
14488
14489 if (first_crtc_state) {
14490 other_crtc_state = crtc_state;
14491 break;
14492 } else {
14493 first_crtc_state = crtc_state;
14494 first_pipe = crtc->pipe;
14495 }
14496 }
14497
14498 /* No workaround needed? */
14499 if (!first_crtc_state)
14500 return 0;
14501
14502 /* w/a possibly needed, check how many crtc's are already enabled. */
14503 for_each_intel_crtc(state->base.dev, crtc) {
14504 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14505 if (IS_ERR(crtc_state))
14506 return PTR_ERR(crtc_state);
14507
14508 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14509
14510 if (!crtc_state->hw.active ||
14511 needs_modeset(crtc_state))
14512 continue;
14513
14514 /* 2 or more enabled crtcs means no need for w/a */
14515 if (enabled_pipe != INVALID_PIPE)
14516 return 0;
14517
14518 enabled_pipe = crtc->pipe;
14519 }
14520
14521 if (enabled_pipe != INVALID_PIPE)
14522 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14523 else if (other_crtc_state)
14524 other_crtc_state->hsw_workaround_pipe = first_pipe;
14525
14526 return 0;
14527 }
14528
14529 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
14530 u8 active_pipes)
14531 {
14532 const struct intel_crtc_state *crtc_state;
14533 struct intel_crtc *crtc;
14534 int i;
14535
14536 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14537 if (crtc_state->hw.active)
14538 active_pipes |= BIT(crtc->pipe);
14539 else
14540 active_pipes &= ~BIT(crtc->pipe);
14541 }
14542
14543 return active_pipes;
14544 }
14545
14546 static int intel_modeset_checks(struct intel_atomic_state *state)
14547 {
14548 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14549 int ret;
14550
14551 state->modeset = true;
14552 state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
14553
14554 state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
14555
14556 if (state->active_pipe_changes) {
14557 ret = _intel_atomic_lock_global_state(state);
14558 if (ret)
14559 return ret;
14560 }
14561
14562 ret = intel_modeset_calc_cdclk(state);
14563 if (ret)
14564 return ret;
14565
14566 intel_modeset_clear_plls(state);
14567
14568 if (IS_HASWELL(dev_priv))
14569 return hsw_mode_set_planes_workaround(state);
14570
14571 return 0;
14572 }
14573
14574 /*
14575 * Handle calculation of various watermark data at the end of the atomic check
14576 * phase. The code here should be run after the per-crtc and per-plane 'check'
14577 * handlers to ensure that all derived state has been updated.
14578 */
14579 static int calc_watermark_data(struct intel_atomic_state *state)
14580 {
14581 struct drm_device *dev = state->base.dev;
14582 struct drm_i915_private *dev_priv = to_i915(dev);
14583
14584 /* Is there platform-specific watermark information to calculate? */
14585 if (dev_priv->display.compute_global_watermarks)
14586 return dev_priv->display.compute_global_watermarks(state);
14587
14588 return 0;
14589 }
14590
14591 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14592 struct intel_crtc_state *new_crtc_state)
14593 {
14594 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14595 return;
14596
14597 new_crtc_state->uapi.mode_changed = false;
14598 new_crtc_state->update_pipe = true;
14599 }
14600
14601 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
14602 struct intel_crtc_state *new_crtc_state)
14603 {
14604 /*
14605 * If we're not doing the full modeset we want to
14606 * keep the current M/N values as they may be
14607 * sufficiently different to the computed values
14608 * to cause problems.
14609 *
14610 * FIXME: should really copy more fuzzy state here
14611 */
14612 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14613 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14614 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14615 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14616 }
14617
14618 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14619 struct intel_crtc *crtc,
14620 u8 plane_ids_mask)
14621 {
14622 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14623 struct intel_plane *plane;
14624
14625 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14626 struct intel_plane_state *plane_state;
14627
14628 if ((plane_ids_mask & BIT(plane->id)) == 0)
14629 continue;
14630
14631 plane_state = intel_atomic_get_plane_state(state, plane);
14632 if (IS_ERR(plane_state))
14633 return PTR_ERR(plane_state);
14634 }
14635
14636 return 0;
14637 }
14638
14639 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14640 {
14641 /* See {hsw,vlv,ivb}_plane_ratio() */
14642 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14643 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14644 IS_IVYBRIDGE(dev_priv);
14645 }
14646
14647 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14648 bool *need_cdclk_calc)
14649 {
14650 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14651 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14652 struct intel_plane_state *plane_state;
14653 struct intel_plane *plane;
14654 struct intel_crtc *crtc;
14655 int i, ret;
14656
14657 ret = icl_add_linked_planes(state);
14658 if (ret)
14659 return ret;
14660
14661 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14662 ret = intel_plane_atomic_check(state, plane);
14663 if (ret) {
14664 drm_dbg_atomic(&dev_priv->drm,
14665 "[PLANE:%d:%s] atomic driver check failed\n",
14666 plane->base.base.id, plane->base.name);
14667 return ret;
14668 }
14669 }
14670
14671 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14672 new_crtc_state, i) {
14673 u8 old_active_planes, new_active_planes;
14674
14675 ret = icl_check_nv12_planes(new_crtc_state);
14676 if (ret)
14677 return ret;
14678
14679 /*
14680 * On some platforms the number of active planes affects
14681 * the planes' minimum cdclk calculation. Add such planes
14682 * to the state before we compute the minimum cdclk.
14683 */
14684 if (!active_planes_affects_min_cdclk(dev_priv))
14685 continue;
14686
14687 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14688 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14689
14690 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14691 continue;
14692
14693 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14694 if (ret)
14695 return ret;
14696 }
14697
14698 /*
14699 * active_planes bitmask has been updated, and potentially
14700 * affected planes are part of the state. We can now
14701 * compute the minimum cdclk for each plane.
14702 */
14703 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14704 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14705 if (ret)
14706 return ret;
14707 }
14708
14709 return 0;
14710 }
14711
14712 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14713 {
14714 struct intel_crtc_state *crtc_state;
14715 struct intel_crtc *crtc;
14716 int i;
14717
14718 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14719 int ret = intel_crtc_atomic_check(state, crtc);
14720 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14721 if (ret) {
14722 drm_dbg_atomic(&i915->drm,
14723 "[CRTC:%d:%s] atomic driver check failed\n",
14724 crtc->base.base.id, crtc->base.name);
14725 return ret;
14726 }
14727 }
14728
14729 return 0;
14730 }
14731
14732 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14733 u8 transcoders)
14734 {
14735 const struct intel_crtc_state *new_crtc_state;
14736 struct intel_crtc *crtc;
14737 int i;
14738
14739 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14740 if (new_crtc_state->hw.enable &&
14741 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14742 needs_modeset(new_crtc_state))
14743 return true;
14744 }
14745
14746 return false;
14747 }
14748
14749 /**
14750 * intel_atomic_check - validate state object
14751 * @dev: drm device
14752 * @_state: state to validate
14753 */
14754 static int intel_atomic_check(struct drm_device *dev,
14755 struct drm_atomic_state *_state)
14756 {
14757 struct drm_i915_private *dev_priv = to_i915(dev);
14758 struct intel_atomic_state *state = to_intel_atomic_state(_state);
14759 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14760 struct intel_cdclk_state *new_cdclk_state;
14761 struct intel_crtc *crtc;
14762 int ret, i;
14763 bool any_ms = false;
14764
14765 /* Catch I915_MODE_FLAG_INHERITED */
14766 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14767 new_crtc_state, i) {
14768 if (new_crtc_state->uapi.mode.private_flags !=
14769 old_crtc_state->uapi.mode.private_flags)
14770 new_crtc_state->uapi.mode_changed = true;
14771 }
14772
14773 ret = drm_atomic_helper_check_modeset(dev, &state->base);
14774 if (ret)
14775 goto fail;
14776
14777 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14778 new_crtc_state, i) {
14779 if (!needs_modeset(new_crtc_state)) {
14780 /* Light copy */
14781 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14782
14783 continue;
14784 }
14785
14786 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14787 if (ret)
14788 goto fail;
14789
14790 if (!new_crtc_state->hw.enable)
14791 continue;
14792
14793 ret = intel_modeset_pipe_config(new_crtc_state);
14794 if (ret)
14795 goto fail;
14796 }
14797
14798 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14799 new_crtc_state, i) {
14800 if (!needs_modeset(new_crtc_state))
14801 continue;
14802
14803 ret = intel_modeset_pipe_config_late(new_crtc_state);
14804 if (ret)
14805 goto fail;
14806
14807 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14808 }
14809
14810 /**
14811 * Check if fastset is allowed by external dependencies like other
14812 * pipes and transcoders.
14813 *
14814 * Right now it only forces a fullmodeset when the MST master
14815 * transcoder did not changed but the pipe of the master transcoder
14816 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14817 * in case of port synced crtcs, if one of the synced crtcs
14818 * needs a full modeset, all other synced crtcs should be
14819 * forced a full modeset.
14820 */
14821 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14822 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
14823 continue;
14824
14825 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14826 enum transcoder master = new_crtc_state->mst_master_transcoder;
14827
14828 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14829 new_crtc_state->uapi.mode_changed = true;
14830 new_crtc_state->update_pipe = false;
14831 }
14832 }
14833
14834 if (is_trans_port_sync_mode(new_crtc_state)) {
14835 u8 trans = new_crtc_state->sync_mode_slaves_mask;
14836
14837 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14838 trans |= BIT(new_crtc_state->master_transcoder);
14839
14840 if (intel_cpu_transcoders_need_modeset(state, trans)) {
14841 new_crtc_state->uapi.mode_changed = true;
14842 new_crtc_state->update_pipe = false;
14843 }
14844 }
14845 }
14846
14847 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14848 new_crtc_state, i) {
14849 if (needs_modeset(new_crtc_state)) {
14850 any_ms = true;
14851 continue;
14852 }
14853
14854 if (!new_crtc_state->update_pipe)
14855 continue;
14856
14857 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
14858 }
14859
14860 if (any_ms && !check_digital_port_conflicts(state)) {
14861 drm_dbg_kms(&dev_priv->drm,
14862 "rejecting conflicting digital port configuration\n");
14863 ret = EINVAL;
14864 goto fail;
14865 }
14866
14867 ret = drm_dp_mst_atomic_check(&state->base);
14868 if (ret)
14869 goto fail;
14870
14871 ret = intel_atomic_check_planes(state, &any_ms);
14872 if (ret)
14873 goto fail;
14874
14875 new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
14876 if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
14877 any_ms = true;
14878
14879 /*
14880 * distrust_bios_wm will force a full dbuf recomputation
14881 * but the hardware state will only get updated accordingly
14882 * if state->modeset==true. Hence distrust_bios_wm==true &&
14883 * state->modeset==false is an invalid combination which
14884 * would cause the hardware and software dbuf state to get
14885 * out of sync. We must prevent that.
14886 *
14887 * FIXME clean up this mess and introduce better
14888 * state tracking for dbuf.
14889 */
14890 if (dev_priv->wm.distrust_bios_wm)
14891 any_ms = true;
14892
14893 if (any_ms) {
14894 ret = intel_modeset_checks(state);
14895 if (ret)
14896 goto fail;
14897 }
14898
14899 ret = intel_atomic_check_crtcs(state);
14900 if (ret)
14901 goto fail;
14902
14903 intel_fbc_choose_crtc(dev_priv, state);
14904 ret = calc_watermark_data(state);
14905 if (ret)
14906 goto fail;
14907
14908 ret = intel_bw_atomic_check(state);
14909 if (ret)
14910 goto fail;
14911
14912 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14913 new_crtc_state, i) {
14914 if (!needs_modeset(new_crtc_state) &&
14915 !new_crtc_state->update_pipe)
14916 continue;
14917
14918 intel_dump_pipe_config(new_crtc_state, state,
14919 needs_modeset(new_crtc_state) ?
14920 "[modeset]" : "[fastset]");
14921 }
14922
14923 return 0;
14924
14925 fail:
14926 if (ret == -EDEADLK)
14927 return ret;
14928
14929 /*
14930 * FIXME would probably be nice to know which crtc specifically
14931 * caused the failure, in cases where we can pinpoint it.
14932 */
14933 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14934 new_crtc_state, i)
14935 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14936
14937 return ret;
14938 }
14939
14940 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14941 {
14942 return drm_atomic_helper_prepare_planes(state->base.dev,
14943 &state->base);
14944 }
14945
14946 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14947 {
14948 struct drm_device *dev = crtc->base.dev;
14949 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14950
14951 if (!vblank->max_vblank_count)
14952 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14953
14954 return crtc->base.funcs->get_vblank_counter(&crtc->base);
14955 }
14956
14957 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14958 struct intel_crtc_state *crtc_state)
14959 {
14960 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14961
14962 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
14963 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14964
14965 if (crtc_state->has_pch_encoder) {
14966 enum pipe pch_transcoder =
14967 intel_crtc_pch_transcoder(crtc);
14968
14969 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14970 }
14971 }
14972
14973 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14974 const struct intel_crtc_state *new_crtc_state)
14975 {
14976 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14977 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14978
14979 /*
14980 * Update pipe size and adjust fitter if needed: the reason for this is
14981 * that in compute_mode_changes we check the native mode (not the pfit
14982 * mode) to see if we can flip rather than do a full mode set. In the
14983 * fastboot case, we'll flip, but if we don't update the pipesrc and
14984 * pfit state, we'll end up with a big fb scanned out into the wrong
14985 * sized surface.
14986 */
14987 intel_set_pipe_src_size(new_crtc_state);
14988
14989 /* on skylake this is done by detaching scalers */
14990 if (INTEL_GEN(dev_priv) >= 9) {
14991 skl_detach_scalers(new_crtc_state);
14992
14993 if (new_crtc_state->pch_pfit.enabled)
14994 skl_pfit_enable(new_crtc_state);
14995 } else if (HAS_PCH_SPLIT(dev_priv)) {
14996 if (new_crtc_state->pch_pfit.enabled)
14997 ilk_pfit_enable(new_crtc_state);
14998 else if (old_crtc_state->pch_pfit.enabled)
14999 ilk_pfit_disable(old_crtc_state);
15000 }
15001
15002 /*
15003 * The register is supposedly single buffered so perhaps
15004 * not 100% correct to do this here. But SKL+ calculate
15005 * this based on the adjust pixel rate so pfit changes do
15006 * affect it and so it must be updated for fastsets.
15007 * HSW/BDW only really need this here for fastboot, after
15008 * that the value should not change without a full modeset.
15009 */
15010 if (INTEL_GEN(dev_priv) >= 9 ||
15011 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15012 hsw_set_linetime_wm(new_crtc_state);
15013
15014 if (INTEL_GEN(dev_priv) >= 11)
15015 icl_set_pipe_chicken(crtc);
15016 }
15017
15018 static void commit_pipe_config(struct intel_atomic_state *state,
15019 struct intel_crtc *crtc)
15020 {
15021 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15022 const struct intel_crtc_state *old_crtc_state =
15023 intel_atomic_get_old_crtc_state(state, crtc);
15024 const struct intel_crtc_state *new_crtc_state =
15025 intel_atomic_get_new_crtc_state(state, crtc);
15026 bool modeset = needs_modeset(new_crtc_state);
15027
15028 /*
15029 * During modesets pipe configuration was programmed as the
15030 * CRTC was enabled.
15031 */
15032 if (!modeset) {
15033 if (new_crtc_state->uapi.color_mgmt_changed ||
15034 new_crtc_state->update_pipe)
15035 intel_color_commit(new_crtc_state);
15036
15037 if (INTEL_GEN(dev_priv) >= 9)
15038 skl_detach_scalers(new_crtc_state);
15039
15040 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15041 bdw_set_pipemisc(new_crtc_state);
15042
15043 if (new_crtc_state->update_pipe)
15044 intel_pipe_fastset(old_crtc_state, new_crtc_state);
15045 }
15046
15047 if (dev_priv->display.atomic_update_watermarks)
15048 dev_priv->display.atomic_update_watermarks(state, crtc);
15049 }
15050
15051 static void intel_enable_crtc(struct intel_atomic_state *state,
15052 struct intel_crtc *crtc)
15053 {
15054 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15055 const struct intel_crtc_state *new_crtc_state =
15056 intel_atomic_get_new_crtc_state(state, crtc);
15057
15058 if (!needs_modeset(new_crtc_state))
15059 return;
15060
15061 intel_crtc_update_active_timings(new_crtc_state);
15062
15063 dev_priv->display.crtc_enable(state, crtc);
15064
15065 /* vblanks work again, re-enable pipe CRC. */
15066 intel_crtc_enable_pipe_crc(crtc);
15067 }
15068
15069 static void intel_update_crtc(struct intel_atomic_state *state,
15070 struct intel_crtc *crtc)
15071 {
15072 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15073 const struct intel_crtc_state *old_crtc_state =
15074 intel_atomic_get_old_crtc_state(state, crtc);
15075 struct intel_crtc_state *new_crtc_state =
15076 intel_atomic_get_new_crtc_state(state, crtc);
15077 bool modeset = needs_modeset(new_crtc_state);
15078
15079 if (!modeset) {
15080 if (new_crtc_state->preload_luts &&
15081 (new_crtc_state->uapi.color_mgmt_changed ||
15082 new_crtc_state->update_pipe))
15083 intel_color_load_luts(new_crtc_state);
15084
15085 intel_pre_plane_update(state, crtc);
15086
15087 if (new_crtc_state->update_pipe)
15088 intel_encoders_update_pipe(state, crtc);
15089 }
15090
15091 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15092 intel_fbc_disable(crtc);
15093 else
15094 intel_fbc_enable(state, crtc);
15095
15096 /* Perform vblank evasion around commit operation */
15097 intel_pipe_update_start(new_crtc_state);
15098
15099 commit_pipe_config(state, crtc);
15100
15101 if (INTEL_GEN(dev_priv) >= 9)
15102 skl_update_planes_on_crtc(state, crtc);
15103 else
15104 i9xx_update_planes_on_crtc(state, crtc);
15105
15106 intel_pipe_update_end(new_crtc_state);
15107
15108 /*
15109 * We usually enable FIFO underrun interrupts as part of the
15110 * CRTC enable sequence during modesets. But when we inherit a
15111 * valid pipe configuration from the BIOS we need to take care
15112 * of enabling them on the CRTC's first fastset.
15113 */
15114 if (new_crtc_state->update_pipe && !modeset &&
15115 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
15116 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15117 }
15118
15119
15120 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
15121 struct intel_crtc_state *old_crtc_state,
15122 struct intel_crtc_state *new_crtc_state,
15123 struct intel_crtc *crtc)
15124 {
15125 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15126
15127 intel_crtc_disable_planes(state, crtc);
15128
15129 /*
15130 * We need to disable pipe CRC before disabling the pipe,
15131 * or we race against vblank off.
15132 */
15133 intel_crtc_disable_pipe_crc(crtc);
15134
15135 dev_priv->display.crtc_disable(state, crtc);
15136 crtc->active = false;
15137 intel_fbc_disable(crtc);
15138 intel_disable_shared_dpll(old_crtc_state);
15139
15140 /* FIXME unify this for all platforms */
15141 if (!new_crtc_state->hw.active &&
15142 !HAS_GMCH(dev_priv) &&
15143 dev_priv->display.initial_watermarks)
15144 dev_priv->display.initial_watermarks(state, crtc);
15145 }
15146
15147 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
15148 {
15149 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15150 struct intel_crtc *crtc;
15151 u32 handled = 0;
15152 int i;
15153
15154 /* Only disable port sync and MST slaves */
15155 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15156 new_crtc_state, i) {
15157 if (!needs_modeset(new_crtc_state))
15158 continue;
15159
15160 if (!old_crtc_state->hw.active)
15161 continue;
15162
15163 /* In case of Transcoder port Sync master slave CRTCs can be
15164 * assigned in any order and we need to make sure that
15165 * slave CRTCs are disabled first and then master CRTC since
15166 * Slave vblanks are masked till Master Vblanks.
15167 */
15168 if (!is_trans_port_sync_slave(old_crtc_state) &&
15169 !intel_dp_mst_is_slave_trans(old_crtc_state))
15170 continue;
15171
15172 intel_pre_plane_update(state, crtc);
15173 intel_old_crtc_state_disables(state, old_crtc_state,
15174 new_crtc_state, crtc);
15175 handled |= BIT(crtc->pipe);
15176 }
15177
15178 /* Disable everything else left on */
15179 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15180 new_crtc_state, i) {
15181 if (!needs_modeset(new_crtc_state) ||
15182 (handled & BIT(crtc->pipe)))
15183 continue;
15184
15185 intel_pre_plane_update(state, crtc);
15186 if (old_crtc_state->hw.active)
15187 intel_old_crtc_state_disables(state, old_crtc_state,
15188 new_crtc_state, crtc);
15189 }
15190 }
15191
15192 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
15193 {
15194 struct intel_crtc_state *new_crtc_state;
15195 struct intel_crtc *crtc;
15196 int i;
15197
15198 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15199 if (!new_crtc_state->hw.active)
15200 continue;
15201
15202 intel_enable_crtc(state, crtc);
15203 intel_update_crtc(state, crtc);
15204 }
15205 }
15206
15207 static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
15208 {
15209 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15210 u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
15211 u8 required_slices = state->enabled_dbuf_slices_mask;
15212 u8 slices_union = hw_enabled_slices | required_slices;
15213
15214 /* If 2nd DBuf slice required, enable it here */
15215 if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
15216 icl_dbuf_slices_update(dev_priv, slices_union);
15217 }
15218
15219 static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
15220 {
15221 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15222 u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
15223 u8 required_slices = state->enabled_dbuf_slices_mask;
15224
15225 /* If 2nd DBuf slice is no more required disable it */
15226 if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
15227 icl_dbuf_slices_update(dev_priv, required_slices);
15228 }
15229
15230 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15231 {
15232 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15233 struct intel_crtc *crtc;
15234 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15235 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15236 u8 update_pipes = 0, modeset_pipes = 0;
15237 int i;
15238
15239 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15240 enum pipe pipe = crtc->pipe;
15241
15242 if (!new_crtc_state->hw.active)
15243 continue;
15244
15245 /* ignore allocations for crtc's that have been turned off. */
15246 if (!needs_modeset(new_crtc_state)) {
15247 entries[pipe] = old_crtc_state->wm.skl.ddb;
15248 update_pipes |= BIT(pipe);
15249 } else {
15250 modeset_pipes |= BIT(pipe);
15251 }
15252 }
15253
15254 /*
15255 * Whenever the number of active pipes changes, we need to make sure we
15256 * update the pipes in the right order so that their ddb allocations
15257 * never overlap with each other between CRTC updates. Otherwise we'll
15258 * cause pipe underruns and other bad stuff.
15259 *
15260 * So first lets enable all pipes that do not need a fullmodeset as
15261 * those don't have any external dependency.
15262 */
15263 while (update_pipes) {
15264 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15265 new_crtc_state, i) {
15266 enum pipe pipe = crtc->pipe;
15267
15268 if ((update_pipes & BIT(pipe)) == 0)
15269 continue;
15270
15271 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15272 entries, I915_MAX_PIPES, pipe))
15273 continue;
15274
15275 entries[pipe] = new_crtc_state->wm.skl.ddb;
15276 update_pipes &= ~BIT(pipe);
15277
15278 intel_update_crtc(state, crtc);
15279
15280 /*
15281 * If this is an already active pipe, it's DDB changed,
15282 * and this isn't the last pipe that needs updating
15283 * then we need to wait for a vblank to pass for the
15284 * new ddb allocation to take effect.
15285 */
15286 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15287 &old_crtc_state->wm.skl.ddb) &&
15288 (update_pipes | modeset_pipes))
15289 intel_wait_for_vblank(dev_priv, pipe);
15290 }
15291 }
15292
15293 update_pipes = modeset_pipes;
15294
15295 /*
15296 * Enable all pipes that needs a modeset and do not depends on other
15297 * pipes
15298 */
15299 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15300 enum pipe pipe = crtc->pipe;
15301
15302 if ((modeset_pipes & BIT(pipe)) == 0)
15303 continue;
15304
15305 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15306 is_trans_port_sync_master(new_crtc_state))
15307 continue;
15308
15309 modeset_pipes &= ~BIT(pipe);
15310
15311 intel_enable_crtc(state, crtc);
15312 }
15313
15314 /*
15315 * Then we enable all remaining pipes that depend on other
15316 * pipes: MST slaves and port sync masters.
15317 */
15318 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15319 enum pipe pipe = crtc->pipe;
15320
15321 if ((modeset_pipes & BIT(pipe)) == 0)
15322 continue;
15323
15324 modeset_pipes &= ~BIT(pipe);
15325
15326 intel_enable_crtc(state, crtc);
15327 }
15328
15329 /*
15330 * Finally we do the plane updates/etc. for all pipes that got enabled.
15331 */
15332 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15333 enum pipe pipe = crtc->pipe;
15334
15335 if ((update_pipes & BIT(pipe)) == 0)
15336 continue;
15337
15338 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15339 entries, I915_MAX_PIPES, pipe));
15340
15341 entries[pipe] = new_crtc_state->wm.skl.ddb;
15342 update_pipes &= ~BIT(pipe);
15343
15344 intel_update_crtc(state, crtc);
15345 }
15346
15347 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
15348 drm_WARN_ON(&dev_priv->drm, update_pipes);
15349 }
15350
15351 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15352 {
15353 struct intel_atomic_state *state, *next;
15354 struct llist_node *freed;
15355
15356 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15357 llist_for_each_entry_safe(state, next, freed, freed)
15358 drm_atomic_state_put(&state->base);
15359 }
15360
15361 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15362 {
15363 struct drm_i915_private *dev_priv =
15364 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15365
15366 intel_atomic_helper_free_state(dev_priv);
15367 }
15368
15369 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15370 {
15371 struct wait_queue_entry wait_fence, wait_reset;
15372 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15373
15374 init_wait_entry(&wait_fence, 0);
15375 init_wait_entry(&wait_reset, 0);
15376 for (;;) {
15377 prepare_to_wait(&intel_state->commit_ready.wait,
15378 &wait_fence, TASK_UNINTERRUPTIBLE);
15379 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15380 I915_RESET_MODESET),
15381 &wait_reset, TASK_UNINTERRUPTIBLE);
15382
15383
15384 if (i915_sw_fence_done(&intel_state->commit_ready) ||
15385 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15386 break;
15387
15388 schedule();
15389 }
15390 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15391 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15392 I915_RESET_MODESET),
15393 &wait_reset);
15394 }
15395
15396 static void intel_atomic_cleanup_work(struct work_struct *work)
15397 {
15398 struct drm_atomic_state *state =
15399 container_of(work, struct drm_atomic_state, commit_work);
15400 struct drm_i915_private *i915 = to_i915(state->dev);
15401
15402 drm_atomic_helper_cleanup_planes(&i915->drm, state);
15403 drm_atomic_helper_commit_cleanup_done(state);
15404 drm_atomic_state_put(state);
15405
15406 intel_atomic_helper_free_state(i915);
15407 }
15408
15409 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15410 {
15411 struct drm_device *dev = state->base.dev;
15412 struct drm_i915_private *dev_priv = to_i915(dev);
15413 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15414 struct intel_crtc *crtc;
15415 u64 put_domains[I915_MAX_PIPES] = {};
15416 intel_wakeref_t wakeref = 0;
15417 int i;
15418
15419 intel_atomic_commit_fence_wait(state);
15420
15421 drm_atomic_helper_wait_for_dependencies(&state->base);
15422
15423 if (state->modeset)
15424 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15425
15426 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15427 new_crtc_state, i) {
15428 if (needs_modeset(new_crtc_state) ||
15429 new_crtc_state->update_pipe) {
15430
15431 put_domains[crtc->pipe] =
15432 modeset_get_crtc_power_domains(new_crtc_state);
15433 }
15434 }
15435
15436 intel_commit_modeset_disables(state);
15437
15438 /* FIXME: Eventually get rid of our crtc->config pointer */
15439 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15440 crtc->config = new_crtc_state;
15441
15442 if (state->modeset) {
15443 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15444
15445 intel_set_cdclk_pre_plane_update(state);
15446
15447 intel_modeset_verify_disabled(dev_priv, state);
15448 }
15449
15450 intel_sagv_pre_plane_update(state);
15451
15452 /* Complete the events for pipes that have now been disabled */
15453 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15454 bool modeset = needs_modeset(new_crtc_state);
15455
15456 /* Complete events for now disable pipes here. */
15457 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15458 spin_lock_irq(&dev->event_lock);
15459 drm_crtc_send_vblank_event(&crtc->base,
15460 new_crtc_state->uapi.event);
15461 spin_unlock_irq(&dev->event_lock);
15462
15463 new_crtc_state->uapi.event = NULL;
15464 }
15465 }
15466
15467 if (state->modeset)
15468 intel_encoders_update_prepare(state);
15469
15470 /* Enable all new slices, we might need */
15471 if (state->modeset)
15472 icl_dbuf_slice_pre_update(state);
15473
15474 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
15475 dev_priv->display.commit_modeset_enables(state);
15476
15477 if (state->modeset) {
15478 intel_encoders_update_complete(state);
15479
15480 intel_set_cdclk_post_plane_update(state);
15481 }
15482
15483 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15484 * already, but still need the state for the delayed optimization. To
15485 * fix this:
15486 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15487 * - schedule that vblank worker _before_ calling hw_done
15488 * - at the start of commit_tail, cancel it _synchrously
15489 * - switch over to the vblank wait helper in the core after that since
15490 * we don't need out special handling any more.
15491 */
15492 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15493
15494 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15495 if (new_crtc_state->hw.active &&
15496 !needs_modeset(new_crtc_state) &&
15497 !new_crtc_state->preload_luts &&
15498 (new_crtc_state->uapi.color_mgmt_changed ||
15499 new_crtc_state->update_pipe))
15500 intel_color_load_luts(new_crtc_state);
15501 }
15502
15503 /*
15504 * Now that the vblank has passed, we can go ahead and program the
15505 * optimal watermarks on platforms that need two-step watermark
15506 * programming.
15507 *
15508 * TODO: Move this (and other cleanup) to an async worker eventually.
15509 */
15510 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15511 new_crtc_state, i) {
15512 /*
15513 * Gen2 reports pipe underruns whenever all planes are disabled.
15514 * So re-enable underrun reporting after some planes get enabled.
15515 *
15516 * We do this before .optimize_watermarks() so that we have a
15517 * chance of catching underruns with the intermediate watermarks
15518 * vs. the new plane configuration.
15519 */
15520 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15521 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15522
15523 if (dev_priv->display.optimize_watermarks)
15524 dev_priv->display.optimize_watermarks(state, crtc);
15525 }
15526
15527 /* Disable all slices, we don't need */
15528 if (state->modeset)
15529 icl_dbuf_slice_post_update(state);
15530
15531 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15532 intel_post_plane_update(state, crtc);
15533
15534 if (put_domains[i])
15535 modeset_put_power_domains(dev_priv, put_domains[i]);
15536
15537 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15538 }
15539
15540 /* Underruns don't always raise interrupts, so check manually */
15541 intel_check_cpu_fifo_underruns(dev_priv);
15542 intel_check_pch_fifo_underruns(dev_priv);
15543
15544 if (state->modeset)
15545 intel_verify_planes(state);
15546
15547 intel_sagv_post_plane_update(state);
15548
15549 drm_atomic_helper_commit_hw_done(&state->base);
15550
15551 if (state->modeset) {
15552 /* As one of the primary mmio accessors, KMS has a high
15553 * likelihood of triggering bugs in unclaimed access. After we
15554 * finish modesetting, see if an error has been flagged, and if
15555 * so enable debugging for the next modeset - and hope we catch
15556 * the culprit.
15557 */
15558 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15559 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15560 }
15561 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15562
15563 /*
15564 * Defer the cleanup of the old state to a separate worker to not
15565 * impede the current task (userspace for blocking modesets) that
15566 * are executed inline. For out-of-line asynchronous modesets/flips,
15567 * deferring to a new worker seems overkill, but we would place a
15568 * schedule point (cond_resched()) here anyway to keep latencies
15569 * down.
15570 */
15571 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15572 queue_work(system_highpri_wq, &state->base.commit_work);
15573 }
15574
15575 static void intel_atomic_commit_work(struct work_struct *work)
15576 {
15577 struct intel_atomic_state *state =
15578 container_of(work, struct intel_atomic_state, base.commit_work);
15579
15580 intel_atomic_commit_tail(state);
15581 }
15582
15583 static int __i915_sw_fence_call
15584 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15585 enum i915_sw_fence_notify notify)
15586 {
15587 struct intel_atomic_state *state =
15588 container_of(fence, struct intel_atomic_state, commit_ready);
15589
15590 switch (notify) {
15591 case FENCE_COMPLETE:
15592 /* we do blocking waits in the worker, nothing to do here */
15593 break;
15594 case FENCE_FREE:
15595 {
15596 struct intel_atomic_helper *helper =
15597 &to_i915(state->base.dev)->atomic_helper;
15598
15599 if (llist_add(&state->freed, &helper->free_list))
15600 schedule_work(&helper->free_work);
15601 break;
15602 }
15603 }
15604
15605 return NOTIFY_DONE;
15606 }
15607
15608 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15609 {
15610 struct intel_plane_state *old_plane_state, *new_plane_state;
15611 struct intel_plane *plane;
15612 int i;
15613
15614 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15615 new_plane_state, i)
15616 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15617 to_intel_frontbuffer(new_plane_state->hw.fb),
15618 plane->frontbuffer_bit);
15619 }
15620
15621 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15622 {
15623 struct intel_crtc *crtc;
15624
15625 for_each_intel_crtc(&dev_priv->drm, crtc)
15626 drm_modeset_lock_assert_held(&crtc->base.mutex);
15627 }
15628
15629 static int intel_atomic_commit(struct drm_device *dev,
15630 struct drm_atomic_state *_state,
15631 bool nonblock)
15632 {
15633 struct intel_atomic_state *state = to_intel_atomic_state(_state);
15634 struct drm_i915_private *dev_priv = to_i915(dev);
15635 int ret = 0;
15636
15637 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15638
15639 drm_atomic_state_get(&state->base);
15640 i915_sw_fence_init(&state->commit_ready,
15641 intel_atomic_commit_ready);
15642
15643 /*
15644 * The intel_legacy_cursor_update() fast path takes care
15645 * of avoiding the vblank waits for simple cursor
15646 * movement and flips. For cursor on/off and size changes,
15647 * we want to perform the vblank waits so that watermark
15648 * updates happen during the correct frames. Gen9+ have
15649 * double buffered watermarks and so shouldn't need this.
15650 *
15651 * Unset state->legacy_cursor_update before the call to
15652 * drm_atomic_helper_setup_commit() because otherwise
15653 * drm_atomic_helper_wait_for_flip_done() is a noop and
15654 * we get FIFO underruns because we didn't wait
15655 * for vblank.
15656 *
15657 * FIXME doing watermarks and fb cleanup from a vblank worker
15658 * (assuming we had any) would solve these problems.
15659 */
15660 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15661 struct intel_crtc_state *new_crtc_state;
15662 struct intel_crtc *crtc;
15663 int i;
15664
15665 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15666 if (new_crtc_state->wm.need_postvbl_update ||
15667 new_crtc_state->update_wm_post)
15668 state->base.legacy_cursor_update = false;
15669 }
15670
15671 ret = intel_atomic_prepare_commit(state);
15672 if (ret) {
15673 drm_dbg_atomic(&dev_priv->drm,
15674 "Preparing state failed with %i\n", ret);
15675 i915_sw_fence_commit(&state->commit_ready);
15676 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15677 return ret;
15678 }
15679
15680 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15681 if (!ret)
15682 ret = drm_atomic_helper_swap_state(&state->base, true);
15683 if (!ret)
15684 intel_atomic_swap_global_state(state);
15685
15686 if (ret) {
15687 i915_sw_fence_commit(&state->commit_ready);
15688
15689 drm_atomic_helper_cleanup_planes(dev, &state->base);
15690 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15691 return ret;
15692 }
15693 dev_priv->wm.distrust_bios_wm = false;
15694 intel_shared_dpll_swap_state(state);
15695 intel_atomic_track_fbs(state);
15696
15697 if (state->global_state_changed) {
15698 assert_global_state_locked(dev_priv);
15699
15700 dev_priv->active_pipes = state->active_pipes;
15701 }
15702
15703 drm_atomic_state_get(&state->base);
15704 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15705
15706 i915_sw_fence_commit(&state->commit_ready);
15707 if (nonblock && state->modeset) {
15708 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15709 } else if (nonblock) {
15710 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15711 } else {
15712 if (state->modeset)
15713 flush_workqueue(dev_priv->modeset_wq);
15714 intel_atomic_commit_tail(state);
15715 }
15716
15717 return 0;
15718 }
15719
15720 struct wait_rps_boost {
15721 struct wait_queue_entry wait;
15722
15723 struct drm_crtc *crtc;
15724 struct i915_request *request;
15725 };
15726
15727 static int do_rps_boost(struct wait_queue_entry *_wait,
15728 unsigned mode, int sync, void *key)
15729 {
15730 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15731 struct i915_request *rq = wait->request;
15732
15733 /*
15734 * If we missed the vblank, but the request is already running it
15735 * is reasonable to assume that it will complete before the next
15736 * vblank without our intervention, so leave RPS alone.
15737 */
15738 if (!i915_request_started(rq))
15739 intel_rps_boost(rq);
15740 i915_request_put(rq);
15741
15742 drm_crtc_vblank_put(wait->crtc);
15743
15744 list_del(&wait->wait.entry);
15745 kfree(wait);
15746 return 1;
15747 }
15748
15749 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15750 struct dma_fence *fence)
15751 {
15752 struct wait_rps_boost *wait;
15753
15754 if (!dma_fence_is_i915(fence))
15755 return;
15756
15757 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15758 return;
15759
15760 if (drm_crtc_vblank_get(crtc))
15761 return;
15762
15763 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15764 if (!wait) {
15765 drm_crtc_vblank_put(crtc);
15766 return;
15767 }
15768
15769 wait->request = to_request(dma_fence_get(fence));
15770 wait->crtc = crtc;
15771
15772 wait->wait.func = do_rps_boost;
15773 wait->wait.flags = 0;
15774
15775 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15776 }
15777
15778 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15779 {
15780 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15781 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15782 struct drm_framebuffer *fb = plane_state->hw.fb;
15783 struct i915_vma *vma;
15784
15785 if (plane->id == PLANE_CURSOR &&
15786 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15787 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15788 const int align = intel_cursor_alignment(dev_priv);
15789 int err;
15790
15791 err = i915_gem_object_attach_phys(obj, align);
15792 if (err)
15793 return err;
15794 }
15795
15796 vma = intel_pin_and_fence_fb_obj(fb,
15797 &plane_state->view,
15798 intel_plane_uses_fence(plane_state),
15799 &plane_state->flags);
15800 if (IS_ERR(vma))
15801 return PTR_ERR(vma);
15802
15803 plane_state->vma = vma;
15804
15805 return 0;
15806 }
15807
15808 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15809 {
15810 struct i915_vma *vma;
15811
15812 vma = fetch_and_zero(&old_plane_state->vma);
15813 if (vma)
15814 intel_unpin_fb_vma(vma, old_plane_state->flags);
15815 }
15816
15817 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15818 {
15819 struct i915_sched_attr attr = {
15820 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15821 };
15822
15823 i915_gem_object_wait_priority(obj, 0, &attr);
15824 }
15825
15826 /**
15827 * intel_prepare_plane_fb - Prepare fb for usage on plane
15828 * @_plane: drm plane to prepare for
15829 * @_new_plane_state: the plane state being prepared
15830 *
15831 * Prepares a framebuffer for usage on a display plane. Generally this
15832 * involves pinning the underlying object and updating the frontbuffer tracking
15833 * bits. Some older platforms need special physical address handling for
15834 * cursor planes.
15835 *
15836 * Returns 0 on success, negative error code on failure.
15837 */
15838 int
15839 intel_prepare_plane_fb(struct drm_plane *_plane,
15840 struct drm_plane_state *_new_plane_state)
15841 {
15842 struct intel_plane *plane = to_intel_plane(_plane);
15843 struct intel_plane_state *new_plane_state =
15844 to_intel_plane_state(_new_plane_state);
15845 struct intel_atomic_state *state =
15846 to_intel_atomic_state(new_plane_state->uapi.state);
15847 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15848 const struct intel_plane_state *old_plane_state =
15849 intel_atomic_get_old_plane_state(state, plane);
15850 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
15851 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
15852 int ret;
15853
15854 if (old_obj) {
15855 const struct intel_crtc_state *crtc_state =
15856 intel_atomic_get_new_crtc_state(state,
15857 to_intel_crtc(old_plane_state->hw.crtc));
15858
15859 /* Big Hammer, we also need to ensure that any pending
15860 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15861 * current scanout is retired before unpinning the old
15862 * framebuffer. Note that we rely on userspace rendering
15863 * into the buffer attached to the pipe they are waiting
15864 * on. If not, userspace generates a GPU hang with IPEHR
15865 * point to the MI_WAIT_FOR_EVENT.
15866 *
15867 * This should only fail upon a hung GPU, in which case we
15868 * can safely continue.
15869 */
15870 if (needs_modeset(crtc_state)) {
15871 ret = i915_sw_fence_await_reservation(&state->commit_ready,
15872 old_obj->base.resv, NULL,
15873 false, 0,
15874 GFP_KERNEL);
15875 if (ret < 0)
15876 return ret;
15877 }
15878 }
15879
15880 if (new_plane_state->uapi.fence) { /* explicit fencing */
15881 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
15882 new_plane_state->uapi.fence,
15883 i915_fence_timeout(dev_priv),
15884 GFP_KERNEL);
15885 if (ret < 0)
15886 return ret;
15887 }
15888
15889 if (!obj)
15890 return 0;
15891
15892 ret = i915_gem_object_pin_pages(obj);
15893 if (ret)
15894 return ret;
15895
15896 ret = intel_plane_pin_fb(new_plane_state);
15897
15898 i915_gem_object_unpin_pages(obj);
15899 if (ret)
15900 return ret;
15901
15902 fb_obj_bump_render_priority(obj);
15903 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
15904
15905 if (!new_plane_state->uapi.fence) { /* implicit fencing */
15906 struct dma_fence *fence;
15907
15908 ret = i915_sw_fence_await_reservation(&state->commit_ready,
15909 obj->base.resv, NULL,
15910 false,
15911 i915_fence_timeout(dev_priv),
15912 GFP_KERNEL);
15913 if (ret < 0)
15914 goto unpin_fb;
15915
15916 fence = dma_resv_get_excl_rcu(obj->base.resv);
15917 if (fence) {
15918 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15919 fence);
15920 dma_fence_put(fence);
15921 }
15922 } else {
15923 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15924 new_plane_state->uapi.fence);
15925 }
15926
15927 /*
15928 * We declare pageflips to be interactive and so merit a small bias
15929 * towards upclocking to deliver the frame on time. By only changing
15930 * the RPS thresholds to sample more regularly and aim for higher
15931 * clocks we can hopefully deliver low power workloads (like kodi)
15932 * that are not quite steady state without resorting to forcing
15933 * maximum clocks following a vblank miss (see do_rps_boost()).
15934 */
15935 if (!state->rps_interactive) {
15936 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15937 state->rps_interactive = true;
15938 }
15939
15940 return 0;
15941
15942 unpin_fb:
15943 intel_plane_unpin_fb(new_plane_state);
15944
15945 return ret;
15946 }
15947
15948 /**
15949 * intel_cleanup_plane_fb - Cleans up an fb after plane use
15950 * @plane: drm plane to clean up for
15951 * @_old_plane_state: the state from the previous modeset
15952 *
15953 * Cleans up a framebuffer that has just been removed from a plane.
15954 */
15955 void
15956 intel_cleanup_plane_fb(struct drm_plane *plane,
15957 struct drm_plane_state *_old_plane_state)
15958 {
15959 struct intel_plane_state *old_plane_state =
15960 to_intel_plane_state(_old_plane_state);
15961 struct intel_atomic_state *state =
15962 to_intel_atomic_state(old_plane_state->uapi.state);
15963 struct drm_i915_private *dev_priv = to_i915(plane->dev);
15964 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
15965
15966 if (!obj)
15967 return;
15968
15969 if (state->rps_interactive) {
15970 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15971 state->rps_interactive = false;
15972 }
15973
15974 /* Should only be called after a successful intel_prepare_plane_fb()! */
15975 intel_plane_unpin_fb(old_plane_state);
15976 }
15977
15978 /**
15979 * intel_plane_destroy - destroy a plane
15980 * @plane: plane to destroy
15981 *
15982 * Common destruction function for all types of planes (primary, cursor,
15983 * sprite).
15984 */
15985 void intel_plane_destroy(struct drm_plane *plane)
15986 {
15987 drm_plane_cleanup(plane);
15988 kfree(to_intel_plane(plane));
15989 }
15990
15991 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15992 u32 format, u64 modifier)
15993 {
15994 switch (modifier) {
15995 case DRM_FORMAT_MOD_LINEAR:
15996 case I915_FORMAT_MOD_X_TILED:
15997 break;
15998 default:
15999 return false;
16000 }
16001
16002 switch (format) {
16003 case DRM_FORMAT_C8:
16004 case DRM_FORMAT_RGB565:
16005 case DRM_FORMAT_XRGB1555:
16006 case DRM_FORMAT_XRGB8888:
16007 return modifier == DRM_FORMAT_MOD_LINEAR ||
16008 modifier == I915_FORMAT_MOD_X_TILED;
16009 default:
16010 return false;
16011 }
16012 }
16013
16014 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
16015 u32 format, u64 modifier)
16016 {
16017 switch (modifier) {
16018 case DRM_FORMAT_MOD_LINEAR:
16019 case I915_FORMAT_MOD_X_TILED:
16020 break;
16021 default:
16022 return false;
16023 }
16024
16025 switch (format) {
16026 case DRM_FORMAT_C8:
16027 case DRM_FORMAT_RGB565:
16028 case DRM_FORMAT_XRGB8888:
16029 case DRM_FORMAT_XBGR8888:
16030 case DRM_FORMAT_ARGB8888:
16031 case DRM_FORMAT_ABGR8888:
16032 case DRM_FORMAT_XRGB2101010:
16033 case DRM_FORMAT_XBGR2101010:
16034 case DRM_FORMAT_ARGB2101010:
16035 case DRM_FORMAT_ABGR2101010:
16036 case DRM_FORMAT_XBGR16161616F:
16037 return modifier == DRM_FORMAT_MOD_LINEAR ||
16038 modifier == I915_FORMAT_MOD_X_TILED;
16039 default:
16040 return false;
16041 }
16042 }
16043
16044 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
16045 u32 format, u64 modifier)
16046 {
16047 return modifier == DRM_FORMAT_MOD_LINEAR &&
16048 format == DRM_FORMAT_ARGB8888;
16049 }
16050
16051 static const struct drm_plane_funcs i965_plane_funcs = {
16052 .update_plane = drm_atomic_helper_update_plane,
16053 .disable_plane = drm_atomic_helper_disable_plane,
16054 .destroy = intel_plane_destroy,
16055 .atomic_duplicate_state = intel_plane_duplicate_state,
16056 .atomic_destroy_state = intel_plane_destroy_state,
16057 .format_mod_supported = i965_plane_format_mod_supported,
16058 };
16059
16060 static const struct drm_plane_funcs i8xx_plane_funcs = {
16061 .update_plane = drm_atomic_helper_update_plane,
16062 .disable_plane = drm_atomic_helper_disable_plane,
16063 .destroy = intel_plane_destroy,
16064 .atomic_duplicate_state = intel_plane_duplicate_state,
16065 .atomic_destroy_state = intel_plane_destroy_state,
16066 .format_mod_supported = i8xx_plane_format_mod_supported,
16067 };
16068
16069 static int
16070 intel_legacy_cursor_update(struct drm_plane *_plane,
16071 struct drm_crtc *_crtc,
16072 struct drm_framebuffer *fb,
16073 int crtc_x, int crtc_y,
16074 unsigned int crtc_w, unsigned int crtc_h,
16075 u32 src_x, u32 src_y,
16076 u32 src_w, u32 src_h,
16077 struct drm_modeset_acquire_ctx *ctx)
16078 {
16079 struct intel_plane *plane = to_intel_plane(_plane);
16080 struct intel_crtc *crtc = to_intel_crtc(_crtc);
16081 struct intel_plane_state *old_plane_state =
16082 to_intel_plane_state(plane->base.state);
16083 struct intel_plane_state *new_plane_state;
16084 struct intel_crtc_state *crtc_state =
16085 to_intel_crtc_state(crtc->base.state);
16086 struct intel_crtc_state *new_crtc_state;
16087 int ret;
16088
16089 /*
16090 * When crtc is inactive or there is a modeset pending,
16091 * wait for it to complete in the slowpath
16092 */
16093 if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
16094 crtc_state->update_pipe)
16095 goto slow;
16096
16097 /*
16098 * Don't do an async update if there is an outstanding commit modifying
16099 * the plane. This prevents our async update's changes from getting
16100 * overridden by a previous synchronous update's state.
16101 */
16102 if (old_plane_state->uapi.commit &&
16103 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
16104 goto slow;
16105
16106 /*
16107 * If any parameters change that may affect watermarks,
16108 * take the slowpath. Only changing fb or position should be
16109 * in the fastpath.
16110 */
16111 if (old_plane_state->uapi.crtc != &crtc->base ||
16112 old_plane_state->uapi.src_w != src_w ||
16113 old_plane_state->uapi.src_h != src_h ||
16114 old_plane_state->uapi.crtc_w != crtc_w ||
16115 old_plane_state->uapi.crtc_h != crtc_h ||
16116 !old_plane_state->uapi.fb != !fb)
16117 goto slow;
16118
16119 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
16120 if (!new_plane_state)
16121 return -ENOMEM;
16122
16123 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
16124 if (!new_crtc_state) {
16125 ret = -ENOMEM;
16126 goto out_free;
16127 }
16128
16129 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
16130
16131 new_plane_state->uapi.src_x = src_x;
16132 new_plane_state->uapi.src_y = src_y;
16133 new_plane_state->uapi.src_w = src_w;
16134 new_plane_state->uapi.src_h = src_h;
16135 new_plane_state->uapi.crtc_x = crtc_x;
16136 new_plane_state->uapi.crtc_y = crtc_y;
16137 new_plane_state->uapi.crtc_w = crtc_w;
16138 new_plane_state->uapi.crtc_h = crtc_h;
16139
16140 intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
16141
16142 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
16143 old_plane_state, new_plane_state);
16144 if (ret)
16145 goto out_free;
16146
16147 ret = intel_plane_pin_fb(new_plane_state);
16148 if (ret)
16149 goto out_free;
16150
16151 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
16152 ORIGIN_FLIP);
16153 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
16154 to_intel_frontbuffer(new_plane_state->hw.fb),
16155 plane->frontbuffer_bit);
16156
16157 /* Swap plane state */
16158 plane->base.state = &new_plane_state->uapi;
16159
16160 /*
16161 * We cannot swap crtc_state as it may be in use by an atomic commit or
16162 * page flip that's running simultaneously. If we swap crtc_state and
16163 * destroy the old state, we will cause a use-after-free there.
16164 *
16165 * Only update active_planes, which is needed for our internal
16166 * bookkeeping. Either value will do the right thing when updating
16167 * planes atomically. If the cursor was part of the atomic update then
16168 * we would have taken the slowpath.
16169 */
16170 crtc_state->active_planes = new_crtc_state->active_planes;
16171
16172 if (new_plane_state->uapi.visible)
16173 intel_update_plane(plane, crtc_state, new_plane_state);
16174 else
16175 intel_disable_plane(plane, crtc_state);
16176
16177 intel_plane_unpin_fb(old_plane_state);
16178
16179 out_free:
16180 if (new_crtc_state)
16181 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
16182 if (ret)
16183 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
16184 else
16185 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
16186 return ret;
16187
16188 slow:
16189 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
16190 crtc_x, crtc_y, crtc_w, crtc_h,
16191 src_x, src_y, src_w, src_h, ctx);
16192 }
16193
16194 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
16195 .update_plane = intel_legacy_cursor_update,
16196 .disable_plane = drm_atomic_helper_disable_plane,
16197 .destroy = intel_plane_destroy,
16198 .atomic_duplicate_state = intel_plane_duplicate_state,
16199 .atomic_destroy_state = intel_plane_destroy_state,
16200 .format_mod_supported = intel_cursor_format_mod_supported,
16201 };
16202
16203 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
16204 enum i9xx_plane_id i9xx_plane)
16205 {
16206 if (!HAS_FBC(dev_priv))
16207 return false;
16208
16209 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16210 return i9xx_plane == PLANE_A; /* tied to pipe A */
16211 else if (IS_IVYBRIDGE(dev_priv))
16212 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
16213 i9xx_plane == PLANE_C;
16214 else if (INTEL_GEN(dev_priv) >= 4)
16215 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
16216 else
16217 return i9xx_plane == PLANE_A;
16218 }
16219
16220 static struct intel_plane *
16221 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
16222 {
16223 struct intel_plane *plane;
16224 const struct drm_plane_funcs *plane_funcs;
16225 unsigned int supported_rotations;
16226 const u32 *formats;
16227 int num_formats;
16228 int ret, zpos;
16229
16230 if (INTEL_GEN(dev_priv) >= 9)
16231 return skl_universal_plane_create(dev_priv, pipe,
16232 PLANE_PRIMARY);
16233
16234 plane = intel_plane_alloc();
16235 if (IS_ERR(plane))
16236 return plane;
16237
16238 plane->pipe = pipe;
16239 /*
16240 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
16241 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
16242 */
16243 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
16244 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
16245 else
16246 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
16247 plane->id = PLANE_PRIMARY;
16248 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
16249
16250 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
16251 if (plane->has_fbc) {
16252 struct intel_fbc *fbc = &dev_priv->fbc;
16253
16254 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
16255 }
16256
16257 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16258 formats = vlv_primary_formats;
16259 num_formats = ARRAY_SIZE(vlv_primary_formats);
16260 } else if (INTEL_GEN(dev_priv) >= 4) {
16261 /*
16262 * WaFP16GammaEnabling:ivb
16263 * "Workaround : When using the 64-bit format, the plane
16264 * output on each color channel has one quarter amplitude.
16265 * It can be brought up to full amplitude by using pipe
16266 * gamma correction or pipe color space conversion to
16267 * multiply the plane output by four."
16268 *
16269 * There is no dedicated plane gamma for the primary plane,
16270 * and using the pipe gamma/csc could conflict with other
16271 * planes, so we choose not to expose fp16 on IVB primary
16272 * planes. HSW primary planes no longer have this problem.
16273 */
16274 if (IS_IVYBRIDGE(dev_priv)) {
16275 formats = ivb_primary_formats;
16276 num_formats = ARRAY_SIZE(ivb_primary_formats);
16277 } else {
16278 formats = i965_primary_formats;
16279 num_formats = ARRAY_SIZE(i965_primary_formats);
16280 }
16281 } else {
16282 formats = i8xx_primary_formats;
16283 num_formats = ARRAY_SIZE(i8xx_primary_formats);
16284 }
16285
16286 if (INTEL_GEN(dev_priv) >= 4)
16287 plane_funcs = &i965_plane_funcs;
16288 else
16289 plane_funcs = &i8xx_plane_funcs;
16290
16291 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16292 plane->min_cdclk = vlv_plane_min_cdclk;
16293 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16294 plane->min_cdclk = hsw_plane_min_cdclk;
16295 else if (IS_IVYBRIDGE(dev_priv))
16296 plane->min_cdclk = ivb_plane_min_cdclk;
16297 else
16298 plane->min_cdclk = i9xx_plane_min_cdclk;
16299
16300 plane->max_stride = i9xx_plane_max_stride;
16301 plane->update_plane = i9xx_update_plane;
16302 plane->disable_plane = i9xx_disable_plane;
16303 plane->get_hw_state = i9xx_plane_get_hw_state;
16304 plane->check_plane = i9xx_plane_check;
16305
16306 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
16307 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16308 0, plane_funcs,
16309 formats, num_formats,
16310 i9xx_format_modifiers,
16311 DRM_PLANE_TYPE_PRIMARY,
16312 "primary %c", pipe_name(pipe));
16313 else
16314 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16315 0, plane_funcs,
16316 formats, num_formats,
16317 i9xx_format_modifiers,
16318 DRM_PLANE_TYPE_PRIMARY,
16319 "plane %c",
16320 plane_name(plane->i9xx_plane));
16321 if (ret)
16322 goto fail;
16323
16324 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
16325 supported_rotations =
16326 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
16327 DRM_MODE_REFLECT_X;
16328 } else if (INTEL_GEN(dev_priv) >= 4) {
16329 supported_rotations =
16330 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
16331 } else {
16332 supported_rotations = DRM_MODE_ROTATE_0;
16333 }
16334
16335 if (INTEL_GEN(dev_priv) >= 4)
16336 drm_plane_create_rotation_property(&plane->base,
16337 DRM_MODE_ROTATE_0,
16338 supported_rotations);
16339
16340 zpos = 0;
16341 drm_plane_create_zpos_immutable_property(&plane->base, zpos);
16342
16343 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
16344
16345 return plane;
16346
16347 fail:
16348 intel_plane_free(plane);
16349
16350 return ERR_PTR(ret);
16351 }
16352
16353 static struct intel_plane *
16354 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
16355 enum pipe pipe)
16356 {
16357 struct intel_plane *cursor;
16358 int ret, zpos;
16359
16360 cursor = intel_plane_alloc();
16361 if (IS_ERR(cursor))
16362 return cursor;
16363
16364 cursor->pipe = pipe;
16365 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
16366 cursor->id = PLANE_CURSOR;
16367 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
16368
16369 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16370 cursor->max_stride = i845_cursor_max_stride;
16371 cursor->update_plane = i845_update_cursor;
16372 cursor->disable_plane = i845_disable_cursor;
16373 cursor->get_hw_state = i845_cursor_get_hw_state;
16374 cursor->check_plane = i845_check_cursor;
16375 } else {
16376 cursor->max_stride = i9xx_cursor_max_stride;
16377 cursor->update_plane = i9xx_update_cursor;
16378 cursor->disable_plane = i9xx_disable_cursor;
16379 cursor->get_hw_state = i9xx_cursor_get_hw_state;
16380 cursor->check_plane = i9xx_check_cursor;
16381 }
16382
16383 cursor->cursor.base = ~0;
16384 cursor->cursor.cntl = ~0;
16385
16386 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
16387 cursor->cursor.size = ~0;
16388
16389 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
16390 0, &intel_cursor_plane_funcs,
16391 intel_cursor_formats,
16392 ARRAY_SIZE(intel_cursor_formats),
16393 cursor_format_modifiers,
16394 DRM_PLANE_TYPE_CURSOR,
16395 "cursor %c", pipe_name(pipe));
16396 if (ret)
16397 goto fail;
16398
16399 if (INTEL_GEN(dev_priv) >= 4)
16400 drm_plane_create_rotation_property(&cursor->base,
16401 DRM_MODE_ROTATE_0,
16402 DRM_MODE_ROTATE_0 |
16403 DRM_MODE_ROTATE_180);
16404
16405 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
16406 drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
16407
16408 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
16409
16410 return cursor;
16411
16412 fail:
16413 intel_plane_free(cursor);
16414
16415 return ERR_PTR(ret);
16416 }
16417
16418 #define INTEL_CRTC_FUNCS \
16419 .gamma_set = drm_atomic_helper_legacy_gamma_set, \
16420 .set_config = drm_atomic_helper_set_config, \
16421 .destroy = intel_crtc_destroy, \
16422 .page_flip = drm_atomic_helper_page_flip, \
16423 .atomic_duplicate_state = intel_crtc_duplicate_state, \
16424 .atomic_destroy_state = intel_crtc_destroy_state, \
16425 .set_crc_source = intel_crtc_set_crc_source, \
16426 .verify_crc_source = intel_crtc_verify_crc_source, \
16427 .get_crc_sources = intel_crtc_get_crc_sources
16428
16429 static const struct drm_crtc_funcs bdw_crtc_funcs = {
16430 INTEL_CRTC_FUNCS,
16431
16432 .get_vblank_counter = g4x_get_vblank_counter,
16433 .enable_vblank = bdw_enable_vblank,
16434 .disable_vblank = bdw_disable_vblank,
16435 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16436 };
16437
16438 static const struct drm_crtc_funcs ilk_crtc_funcs = {
16439 INTEL_CRTC_FUNCS,
16440
16441 .get_vblank_counter = g4x_get_vblank_counter,
16442 .enable_vblank = ilk_enable_vblank,
16443 .disable_vblank = ilk_disable_vblank,
16444 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16445 };
16446
16447 static const struct drm_crtc_funcs g4x_crtc_funcs = {
16448 INTEL_CRTC_FUNCS,
16449
16450 .get_vblank_counter = g4x_get_vblank_counter,
16451 .enable_vblank = i965_enable_vblank,
16452 .disable_vblank = i965_disable_vblank,
16453 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16454 };
16455
16456 static const struct drm_crtc_funcs i965_crtc_funcs = {
16457 INTEL_CRTC_FUNCS,
16458
16459 .get_vblank_counter = i915_get_vblank_counter,
16460 .enable_vblank = i965_enable_vblank,
16461 .disable_vblank = i965_disable_vblank,
16462 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16463 };
16464
16465 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
16466 INTEL_CRTC_FUNCS,
16467
16468 .get_vblank_counter = i915_get_vblank_counter,
16469 .enable_vblank = i915gm_enable_vblank,
16470 .disable_vblank = i915gm_disable_vblank,
16471 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16472 };
16473
16474 static const struct drm_crtc_funcs i915_crtc_funcs = {
16475 INTEL_CRTC_FUNCS,
16476
16477 .get_vblank_counter = i915_get_vblank_counter,
16478 .enable_vblank = i8xx_enable_vblank,
16479 .disable_vblank = i8xx_disable_vblank,
16480 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16481 };
16482
16483 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
16484 INTEL_CRTC_FUNCS,
16485
16486 /* no hw vblank counter */
16487 .enable_vblank = i8xx_enable_vblank,
16488 .disable_vblank = i8xx_disable_vblank,
16489 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16490 };
16491
16492 static struct intel_crtc *intel_crtc_alloc(void)
16493 {
16494 struct intel_crtc_state *crtc_state;
16495 struct intel_crtc *crtc;
16496
16497 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
16498 if (!crtc)
16499 return ERR_PTR(-ENOMEM);
16500
16501 crtc_state = intel_crtc_state_alloc(crtc);
16502 if (!crtc_state) {
16503 kfree(crtc);
16504 return ERR_PTR(-ENOMEM);
16505 }
16506
16507 crtc->base.state = &crtc_state->uapi;
16508 crtc->config = crtc_state;
16509
16510 return crtc;
16511 }
16512
16513 static void intel_crtc_free(struct intel_crtc *crtc)
16514 {
16515 intel_crtc_destroy_state(&crtc->base, crtc->base.state);
16516 kfree(crtc);
16517 }
16518
16519 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
16520 {
16521 struct intel_plane *plane;
16522
16523 for_each_intel_plane(&dev_priv->drm, plane) {
16524 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
16525 plane->pipe);
16526
16527 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
16528 }
16529 }
16530
16531 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
16532 {
16533 struct intel_plane *primary, *cursor;
16534 const struct drm_crtc_funcs *funcs;
16535 struct intel_crtc *crtc;
16536 int sprite, ret;
16537
16538 crtc = intel_crtc_alloc();
16539 if (IS_ERR(crtc))
16540 return PTR_ERR(crtc);
16541
16542 crtc->pipe = pipe;
16543 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
16544
16545 primary = intel_primary_plane_create(dev_priv, pipe);
16546 if (IS_ERR(primary)) {
16547 ret = PTR_ERR(primary);
16548 goto fail;
16549 }
16550 crtc->plane_ids_mask |= BIT(primary->id);
16551
16552 for_each_sprite(dev_priv, pipe, sprite) {
16553 struct intel_plane *plane;
16554
16555 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
16556 if (IS_ERR(plane)) {
16557 ret = PTR_ERR(plane);
16558 goto fail;
16559 }
16560 crtc->plane_ids_mask |= BIT(plane->id);
16561 }
16562
16563 cursor = intel_cursor_plane_create(dev_priv, pipe);
16564 if (IS_ERR(cursor)) {
16565 ret = PTR_ERR(cursor);
16566 goto fail;
16567 }
16568 crtc->plane_ids_mask |= BIT(cursor->id);
16569
16570 if (HAS_GMCH(dev_priv)) {
16571 if (IS_CHERRYVIEW(dev_priv) ||
16572 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16573 funcs = &g4x_crtc_funcs;
16574 else if (IS_GEN(dev_priv, 4))
16575 funcs = &i965_crtc_funcs;
16576 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16577 funcs = &i915gm_crtc_funcs;
16578 else if (IS_GEN(dev_priv, 3))
16579 funcs = &i915_crtc_funcs;
16580 else
16581 funcs = &i8xx_crtc_funcs;
16582 } else {
16583 if (INTEL_GEN(dev_priv) >= 8)
16584 funcs = &bdw_crtc_funcs;
16585 else
16586 funcs = &ilk_crtc_funcs;
16587 }
16588
16589 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16590 &primary->base, &cursor->base,
16591 funcs, "pipe %c", pipe_name(pipe));
16592 if (ret)
16593 goto fail;
16594
16595 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16596 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16597 dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16598
16599 if (INTEL_GEN(dev_priv) < 9) {
16600 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16601
16602 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16603 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16604 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16605 }
16606
16607 intel_color_init(crtc);
16608
16609 intel_crtc_crc_init(crtc);
16610
16611 drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
16612
16613 return 0;
16614
16615 fail:
16616 intel_crtc_free(crtc);
16617
16618 return ret;
16619 }
16620
16621 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16622 struct drm_file *file)
16623 {
16624 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16625 struct drm_crtc *drmmode_crtc;
16626 struct intel_crtc *crtc;
16627
16628 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16629 if (!drmmode_crtc)
16630 return -ENOENT;
16631
16632 crtc = to_intel_crtc(drmmode_crtc);
16633 pipe_from_crtc_id->pipe = crtc->pipe;
16634
16635 return 0;
16636 }
16637
16638 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16639 {
16640 struct drm_device *dev = encoder->base.dev;
16641 struct intel_encoder *source_encoder;
16642 u32 possible_clones = 0;
16643
16644 for_each_intel_encoder(dev, source_encoder) {
16645 if (encoders_cloneable(encoder, source_encoder))
16646 possible_clones |= drm_encoder_mask(&source_encoder->base);
16647 }
16648
16649 return possible_clones;
16650 }
16651
16652 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16653 {
16654 struct drm_device *dev = encoder->base.dev;
16655 struct intel_crtc *crtc;
16656 u32 possible_crtcs = 0;
16657
16658 for_each_intel_crtc(dev, crtc) {
16659 if (encoder->pipe_mask & BIT(crtc->pipe))
16660 possible_crtcs |= drm_crtc_mask(&crtc->base);
16661 }
16662
16663 return possible_crtcs;
16664 }
16665
16666 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16667 {
16668 if (!IS_MOBILE(dev_priv))
16669 return false;
16670
16671 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16672 return false;
16673
16674 if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16675 return false;
16676
16677 return true;
16678 }
16679
16680 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16681 {
16682 if (INTEL_GEN(dev_priv) >= 9)
16683 return false;
16684
16685 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16686 return false;
16687
16688 if (HAS_PCH_LPT_H(dev_priv) &&
16689 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16690 return false;
16691
16692 /* DDI E can't be used if DDI A requires 4 lanes */
16693 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16694 return false;
16695
16696 if (!dev_priv->vbt.int_crt_support)
16697 return false;
16698
16699 return true;
16700 }
16701
16702 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16703 {
16704 int pps_num;
16705 int pps_idx;
16706
16707 if (HAS_DDI(dev_priv))
16708 return;
16709 /*
16710 * This w/a is needed at least on CPT/PPT, but to be sure apply it
16711 * everywhere where registers can be write protected.
16712 */
16713 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16714 pps_num = 2;
16715 else
16716 pps_num = 1;
16717
16718 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16719 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16720
16721 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16722 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16723 }
16724 }
16725
16726 static void intel_pps_init(struct drm_i915_private *dev_priv)
16727 {
16728 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16729 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16730 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16731 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16732 else
16733 dev_priv->pps_mmio_base = PPS_BASE;
16734
16735 intel_pps_unlock_regs_wa(dev_priv);
16736 }
16737
16738 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16739 {
16740 struct intel_encoder *encoder;
16741 bool dpd_is_edp = false;
16742
16743 intel_pps_init(dev_priv);
16744
16745 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16746 return;
16747
16748 if (INTEL_GEN(dev_priv) >= 12) {
16749 intel_ddi_init(dev_priv, PORT_A);
16750 intel_ddi_init(dev_priv, PORT_B);
16751 intel_ddi_init(dev_priv, PORT_D);
16752 intel_ddi_init(dev_priv, PORT_E);
16753 intel_ddi_init(dev_priv, PORT_F);
16754 intel_ddi_init(dev_priv, PORT_G);
16755 intel_ddi_init(dev_priv, PORT_H);
16756 intel_ddi_init(dev_priv, PORT_I);
16757 icl_dsi_init(dev_priv);
16758 } else if (IS_ELKHARTLAKE(dev_priv)) {
16759 intel_ddi_init(dev_priv, PORT_A);
16760 intel_ddi_init(dev_priv, PORT_B);
16761 intel_ddi_init(dev_priv, PORT_C);
16762 intel_ddi_init(dev_priv, PORT_D);
16763 icl_dsi_init(dev_priv);
16764 } else if (IS_GEN(dev_priv, 11)) {
16765 intel_ddi_init(dev_priv, PORT_A);
16766 intel_ddi_init(dev_priv, PORT_B);
16767 intel_ddi_init(dev_priv, PORT_C);
16768 intel_ddi_init(dev_priv, PORT_D);
16769 intel_ddi_init(dev_priv, PORT_E);
16770 /*
16771 * On some ICL SKUs port F is not present. No strap bits for
16772 * this, so rely on VBT.
16773 * Work around broken VBTs on SKUs known to have no port F.
16774 */
16775 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16776 intel_bios_is_port_present(dev_priv, PORT_F))
16777 intel_ddi_init(dev_priv, PORT_F);
16778
16779 icl_dsi_init(dev_priv);
16780 } else if (IS_GEN9_LP(dev_priv)) {
16781 /*
16782 * FIXME: Broxton doesn't support port detection via the
16783 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16784 * detect the ports.
16785 */
16786 intel_ddi_init(dev_priv, PORT_A);
16787 intel_ddi_init(dev_priv, PORT_B);
16788 intel_ddi_init(dev_priv, PORT_C);
16789
16790 vlv_dsi_init(dev_priv);
16791 } else if (HAS_DDI(dev_priv)) {
16792 int found;
16793
16794 if (intel_ddi_crt_present(dev_priv))
16795 intel_crt_init(dev_priv);
16796
16797 /*
16798 * Haswell uses DDI functions to detect digital outputs.
16799 * On SKL pre-D0 the strap isn't connected, so we assume
16800 * it's there.
16801 */
16802 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16803 /* WaIgnoreDDIAStrap: skl */
16804 if (found || IS_GEN9_BC(dev_priv))
16805 intel_ddi_init(dev_priv, PORT_A);
16806
16807 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16808 * register */
16809 found = intel_de_read(dev_priv, SFUSE_STRAP);
16810
16811 if (found & SFUSE_STRAP_DDIB_DETECTED)
16812 intel_ddi_init(dev_priv, PORT_B);
16813 if (found & SFUSE_STRAP_DDIC_DETECTED)
16814 intel_ddi_init(dev_priv, PORT_C);
16815 if (found & SFUSE_STRAP_DDID_DETECTED)
16816 intel_ddi_init(dev_priv, PORT_D);
16817 if (found & SFUSE_STRAP_DDIF_DETECTED)
16818 intel_ddi_init(dev_priv, PORT_F);
16819 /*
16820 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16821 */
16822 if (IS_GEN9_BC(dev_priv) &&
16823 intel_bios_is_port_present(dev_priv, PORT_E))
16824 intel_ddi_init(dev_priv, PORT_E);
16825
16826 } else if (HAS_PCH_SPLIT(dev_priv)) {
16827 int found;
16828
16829 /*
16830 * intel_edp_init_connector() depends on this completing first,
16831 * to prevent the registration of both eDP and LVDS and the
16832 * incorrect sharing of the PPS.
16833 */
16834 intel_lvds_init(dev_priv);
16835 intel_crt_init(dev_priv);
16836
16837 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16838
16839 if (ilk_has_edp_a(dev_priv))
16840 intel_dp_init(dev_priv, DP_A, PORT_A);
16841
16842 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
16843 /* PCH SDVOB multiplex with HDMIB */
16844 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16845 if (!found)
16846 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16847 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
16848 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16849 }
16850
16851 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
16852 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16853
16854 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
16855 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16856
16857 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
16858 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16859
16860 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
16861 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16862 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16863 bool has_edp, has_port;
16864
16865 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16866 intel_crt_init(dev_priv);
16867
16868 /*
16869 * The DP_DETECTED bit is the latched state of the DDC
16870 * SDA pin at boot. However since eDP doesn't require DDC
16871 * (no way to plug in a DP->HDMI dongle) the DDC pins for
16872 * eDP ports may have been muxed to an alternate function.
16873 * Thus we can't rely on the DP_DETECTED bit alone to detect
16874 * eDP ports. Consult the VBT as well as DP_DETECTED to
16875 * detect eDP ports.
16876 *
16877 * Sadly the straps seem to be missing sometimes even for HDMI
16878 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16879 * and VBT for the presence of the port. Additionally we can't
16880 * trust the port type the VBT declares as we've seen at least
16881 * HDMI ports that the VBT claim are DP or eDP.
16882 */
16883 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16884 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16885 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
16886 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16887 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16888 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16889
16890 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16891 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16892 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
16893 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16894 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16895 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16896
16897 if (IS_CHERRYVIEW(dev_priv)) {
16898 /*
16899 * eDP not supported on port D,
16900 * so no need to worry about it
16901 */
16902 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16903 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
16904 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16905 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
16906 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16907 }
16908
16909 vlv_dsi_init(dev_priv);
16910 } else if (IS_PINEVIEW(dev_priv)) {
16911 intel_lvds_init(dev_priv);
16912 intel_crt_init(dev_priv);
16913 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16914 bool found = false;
16915
16916 if (IS_MOBILE(dev_priv))
16917 intel_lvds_init(dev_priv);
16918
16919 intel_crt_init(dev_priv);
16920
16921 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16922 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
16923 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16924 if (!found && IS_G4X(dev_priv)) {
16925 drm_dbg_kms(&dev_priv->drm,
16926 "probing HDMI on SDVOB\n");
16927 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16928 }
16929
16930 if (!found && IS_G4X(dev_priv))
16931 intel_dp_init(dev_priv, DP_B, PORT_B);
16932 }
16933
16934 /* Before G4X SDVOC doesn't have its own detect register */
16935
16936 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16937 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
16938 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16939 }
16940
16941 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
16942
16943 if (IS_G4X(dev_priv)) {
16944 drm_dbg_kms(&dev_priv->drm,
16945 "probing HDMI on SDVOC\n");
16946 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16947 }
16948 if (IS_G4X(dev_priv))
16949 intel_dp_init(dev_priv, DP_C, PORT_C);
16950 }
16951
16952 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
16953 intel_dp_init(dev_priv, DP_D, PORT_D);
16954
16955 if (SUPPORTS_TV(dev_priv))
16956 intel_tv_init(dev_priv);
16957 } else if (IS_GEN(dev_priv, 2)) {
16958 if (IS_I85X(dev_priv))
16959 intel_lvds_init(dev_priv);
16960
16961 intel_crt_init(dev_priv);
16962 intel_dvo_init(dev_priv);
16963 }
16964
16965 intel_psr_init(dev_priv);
16966
16967 for_each_intel_encoder(&dev_priv->drm, encoder) {
16968 encoder->base.possible_crtcs =
16969 intel_encoder_possible_crtcs(encoder);
16970 encoder->base.possible_clones =
16971 intel_encoder_possible_clones(encoder);
16972 }
16973
16974 intel_init_pch_refclk(dev_priv);
16975
16976 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16977 }
16978
16979 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16980 {
16981 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16982
16983 drm_framebuffer_cleanup(fb);
16984 intel_frontbuffer_put(intel_fb->frontbuffer);
16985
16986 kfree(intel_fb);
16987 }
16988
16989 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16990 struct drm_file *file,
16991 unsigned int *handle)
16992 {
16993 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16994 struct drm_i915_private *i915 = to_i915(obj->base.dev);
16995
16996 if (obj->userptr.mm) {
16997 drm_dbg(&i915->drm,
16998 "attempting to use a userptr for a framebuffer, denied\n");
16999 return -EINVAL;
17000 }
17001
17002 return drm_gem_handle_create(file, &obj->base, handle);
17003 }
17004
17005 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
17006 struct drm_file *file,
17007 unsigned flags, unsigned color,
17008 struct drm_clip_rect *clips,
17009 unsigned num_clips)
17010 {
17011 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17012
17013 i915_gem_object_flush_if_display(obj);
17014 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
17015
17016 return 0;
17017 }
17018
17019 static const struct drm_framebuffer_funcs intel_fb_funcs = {
17020 .destroy = intel_user_framebuffer_destroy,
17021 .create_handle = intel_user_framebuffer_create_handle,
17022 .dirty = intel_user_framebuffer_dirty,
17023 };
17024
17025 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
17026 struct drm_i915_gem_object *obj,
17027 struct drm_mode_fb_cmd2 *mode_cmd)
17028 {
17029 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
17030 struct drm_framebuffer *fb = &intel_fb->base;
17031 u32 max_stride;
17032 unsigned int tiling, stride;
17033 int ret = -EINVAL;
17034 int i;
17035
17036 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
17037 if (!intel_fb->frontbuffer)
17038 return -ENOMEM;
17039
17040 i915_gem_object_lock(obj);
17041 tiling = i915_gem_object_get_tiling(obj);
17042 stride = i915_gem_object_get_stride(obj);
17043 i915_gem_object_unlock(obj);
17044
17045 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
17046 /*
17047 * If there's a fence, enforce that
17048 * the fb modifier and tiling mode match.
17049 */
17050 if (tiling != I915_TILING_NONE &&
17051 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17052 drm_dbg_kms(&dev_priv->drm,
17053 "tiling_mode doesn't match fb modifier\n");
17054 goto err;
17055 }
17056 } else {
17057 if (tiling == I915_TILING_X) {
17058 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
17059 } else if (tiling == I915_TILING_Y) {
17060 drm_dbg_kms(&dev_priv->drm,
17061 "No Y tiling for legacy addfb\n");
17062 goto err;
17063 }
17064 }
17065
17066 if (!drm_any_plane_has_format(&dev_priv->drm,
17067 mode_cmd->pixel_format,
17068 mode_cmd->modifier[0])) {
17069 struct drm_format_name_buf format_name;
17070
17071 drm_dbg_kms(&dev_priv->drm,
17072 "unsupported pixel format %s / modifier 0x%llx\n",
17073 drm_get_format_name(mode_cmd->pixel_format,
17074 &format_name),
17075 mode_cmd->modifier[0]);
17076 goto err;
17077 }
17078
17079 /*
17080 * gen2/3 display engine uses the fence if present,
17081 * so the tiling mode must match the fb modifier exactly.
17082 */
17083 if (INTEL_GEN(dev_priv) < 4 &&
17084 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17085 drm_dbg_kms(&dev_priv->drm,
17086 "tiling_mode must match fb modifier exactly on gen2/3\n");
17087 goto err;
17088 }
17089
17090 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
17091 mode_cmd->modifier[0]);
17092 if (mode_cmd->pitches[0] > max_stride) {
17093 drm_dbg_kms(&dev_priv->drm,
17094 "%s pitch (%u) must be at most %d\n",
17095 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
17096 "tiled" : "linear",
17097 mode_cmd->pitches[0], max_stride);
17098 goto err;
17099 }
17100
17101 /*
17102 * If there's a fence, enforce that
17103 * the fb pitch and fence stride match.
17104 */
17105 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
17106 drm_dbg_kms(&dev_priv->drm,
17107 "pitch (%d) must match tiling stride (%d)\n",
17108 mode_cmd->pitches[0], stride);
17109 goto err;
17110 }
17111
17112 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
17113 if (mode_cmd->offsets[0] != 0) {
17114 drm_dbg_kms(&dev_priv->drm,
17115 "plane 0 offset (0x%08x) must be 0\n",
17116 mode_cmd->offsets[0]);
17117 goto err;
17118 }
17119
17120 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
17121
17122 for (i = 0; i < fb->format->num_planes; i++) {
17123 u32 stride_alignment;
17124
17125 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
17126 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
17127 i);
17128 goto err;
17129 }
17130
17131 stride_alignment = intel_fb_stride_alignment(fb, i);
17132 if (fb->pitches[i] & (stride_alignment - 1)) {
17133 drm_dbg_kms(&dev_priv->drm,
17134 "plane %d pitch (%d) must be at least %u byte aligned\n",
17135 i, fb->pitches[i], stride_alignment);
17136 goto err;
17137 }
17138
17139 if (is_gen12_ccs_plane(fb, i)) {
17140 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
17141
17142 if (fb->pitches[i] != ccs_aux_stride) {
17143 drm_dbg_kms(&dev_priv->drm,
17144 "ccs aux plane %d pitch (%d) must be %d\n",
17145 i,
17146 fb->pitches[i], ccs_aux_stride);
17147 goto err;
17148 }
17149 }
17150
17151 fb->obj[i] = &obj->base;
17152 }
17153
17154 ret = intel_fill_fb_info(dev_priv, fb);
17155 if (ret)
17156 goto err;
17157
17158 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
17159 if (ret) {
17160 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
17161 goto err;
17162 }
17163
17164 return 0;
17165
17166 err:
17167 intel_frontbuffer_put(intel_fb->frontbuffer);
17168 return ret;
17169 }
17170
17171 static struct drm_framebuffer *
17172 intel_user_framebuffer_create(struct drm_device *dev,
17173 struct drm_file *filp,
17174 const struct drm_mode_fb_cmd2 *user_mode_cmd)
17175 {
17176 struct drm_framebuffer *fb;
17177 struct drm_i915_gem_object *obj;
17178 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
17179
17180 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
17181 if (!obj)
17182 return ERR_PTR(-ENOENT);
17183
17184 fb = intel_framebuffer_create(obj, &mode_cmd);
17185 i915_gem_object_put(obj);
17186
17187 return fb;
17188 }
17189
17190 static enum drm_mode_status
17191 intel_mode_valid(struct drm_device *dev,
17192 const struct drm_display_mode *mode)
17193 {
17194 struct drm_i915_private *dev_priv = to_i915(dev);
17195 int hdisplay_max, htotal_max;
17196 int vdisplay_max, vtotal_max;
17197
17198 /*
17199 * Can't reject DBLSCAN here because Xorg ddxen can add piles
17200 * of DBLSCAN modes to the output's mode list when they detect
17201 * the scaling mode property on the connector. And they don't
17202 * ask the kernel to validate those modes in any way until
17203 * modeset time at which point the client gets a protocol error.
17204 * So in order to not upset those clients we silently ignore the
17205 * DBLSCAN flag on such connectors. For other connectors we will
17206 * reject modes with the DBLSCAN flag in encoder->compute_config().
17207 * And we always reject DBLSCAN modes in connector->mode_valid()
17208 * as we never want such modes on the connector's mode list.
17209 */
17210
17211 if (mode->vscan > 1)
17212 return MODE_NO_VSCAN;
17213
17214 if (mode->flags & DRM_MODE_FLAG_HSKEW)
17215 return MODE_H_ILLEGAL;
17216
17217 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
17218 DRM_MODE_FLAG_NCSYNC |
17219 DRM_MODE_FLAG_PCSYNC))
17220 return MODE_HSYNC;
17221
17222 if (mode->flags & (DRM_MODE_FLAG_BCAST |
17223 DRM_MODE_FLAG_PIXMUX |
17224 DRM_MODE_FLAG_CLKDIV2))
17225 return MODE_BAD;
17226
17227 /* Transcoder timing limits */
17228 if (INTEL_GEN(dev_priv) >= 11) {
17229 hdisplay_max = 16384;
17230 vdisplay_max = 8192;
17231 htotal_max = 16384;
17232 vtotal_max = 8192;
17233 } else if (INTEL_GEN(dev_priv) >= 9 ||
17234 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17235 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
17236 vdisplay_max = 4096;
17237 htotal_max = 8192;
17238 vtotal_max = 8192;
17239 } else if (INTEL_GEN(dev_priv) >= 3) {
17240 hdisplay_max = 4096;
17241 vdisplay_max = 4096;
17242 htotal_max = 8192;
17243 vtotal_max = 8192;
17244 } else {
17245 hdisplay_max = 2048;
17246 vdisplay_max = 2048;
17247 htotal_max = 4096;
17248 vtotal_max = 4096;
17249 }
17250
17251 if (mode->hdisplay > hdisplay_max ||
17252 mode->hsync_start > htotal_max ||
17253 mode->hsync_end > htotal_max ||
17254 mode->htotal > htotal_max)
17255 return MODE_H_ILLEGAL;
17256
17257 if (mode->vdisplay > vdisplay_max ||
17258 mode->vsync_start > vtotal_max ||
17259 mode->vsync_end > vtotal_max ||
17260 mode->vtotal > vtotal_max)
17261 return MODE_V_ILLEGAL;
17262
17263 if (INTEL_GEN(dev_priv) >= 5) {
17264 if (mode->hdisplay < 64 ||
17265 mode->htotal - mode->hdisplay < 32)
17266 return MODE_H_ILLEGAL;
17267
17268 if (mode->vtotal - mode->vdisplay < 5)
17269 return MODE_V_ILLEGAL;
17270 } else {
17271 if (mode->htotal - mode->hdisplay < 32)
17272 return MODE_H_ILLEGAL;
17273
17274 if (mode->vtotal - mode->vdisplay < 3)
17275 return MODE_V_ILLEGAL;
17276 }
17277
17278 return MODE_OK;
17279 }
17280
17281 enum drm_mode_status
17282 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
17283 const struct drm_display_mode *mode)
17284 {
17285 int plane_width_max, plane_height_max;
17286
17287 /*
17288 * intel_mode_valid() should be
17289 * sufficient on older platforms.
17290 */
17291 if (INTEL_GEN(dev_priv) < 9)
17292 return MODE_OK;
17293
17294 /*
17295 * Most people will probably want a fullscreen
17296 * plane so let's not advertize modes that are
17297 * too big for that.
17298 */
17299 if (INTEL_GEN(dev_priv) >= 11) {
17300 plane_width_max = 5120;
17301 plane_height_max = 4320;
17302 } else {
17303 plane_width_max = 5120;
17304 plane_height_max = 4096;
17305 }
17306
17307 if (mode->hdisplay > plane_width_max)
17308 return MODE_H_ILLEGAL;
17309
17310 if (mode->vdisplay > plane_height_max)
17311 return MODE_V_ILLEGAL;
17312
17313 return MODE_OK;
17314 }
17315
17316 static const struct drm_mode_config_funcs intel_mode_funcs = {
17317 .fb_create = intel_user_framebuffer_create,
17318 .get_format_info = intel_get_format_info,
17319 .output_poll_changed = intel_fbdev_output_poll_changed,
17320 .mode_valid = intel_mode_valid,
17321 .atomic_check = intel_atomic_check,
17322 .atomic_commit = intel_atomic_commit,
17323 .atomic_state_alloc = intel_atomic_state_alloc,
17324 .atomic_state_clear = intel_atomic_state_clear,
17325 .atomic_state_free = intel_atomic_state_free,
17326 };
17327
17328 /**
17329 * intel_init_display_hooks - initialize the display modesetting hooks
17330 * @dev_priv: device private
17331 */
17332 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
17333 {
17334 intel_init_cdclk_hooks(dev_priv);
17335
17336 if (INTEL_GEN(dev_priv) >= 9) {
17337 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17338 dev_priv->display.get_initial_plane_config =
17339 skl_get_initial_plane_config;
17340 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
17341 dev_priv->display.crtc_enable = hsw_crtc_enable;
17342 dev_priv->display.crtc_disable = hsw_crtc_disable;
17343 } else if (HAS_DDI(dev_priv)) {
17344 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17345 dev_priv->display.get_initial_plane_config =
17346 i9xx_get_initial_plane_config;
17347 dev_priv->display.crtc_compute_clock =
17348 hsw_crtc_compute_clock;
17349 dev_priv->display.crtc_enable = hsw_crtc_enable;
17350 dev_priv->display.crtc_disable = hsw_crtc_disable;
17351 } else if (HAS_PCH_SPLIT(dev_priv)) {
17352 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
17353 dev_priv->display.get_initial_plane_config =
17354 i9xx_get_initial_plane_config;
17355 dev_priv->display.crtc_compute_clock =
17356 ilk_crtc_compute_clock;
17357 dev_priv->display.crtc_enable = ilk_crtc_enable;
17358 dev_priv->display.crtc_disable = ilk_crtc_disable;
17359 } else if (IS_CHERRYVIEW(dev_priv)) {
17360 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17361 dev_priv->display.get_initial_plane_config =
17362 i9xx_get_initial_plane_config;
17363 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
17364 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17365 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17366 } else if (IS_VALLEYVIEW(dev_priv)) {
17367 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17368 dev_priv->display.get_initial_plane_config =
17369 i9xx_get_initial_plane_config;
17370 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
17371 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17372 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17373 } else if (IS_G4X(dev_priv)) {
17374 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17375 dev_priv->display.get_initial_plane_config =
17376 i9xx_get_initial_plane_config;
17377 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
17378 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17379 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17380 } else if (IS_PINEVIEW(dev_priv)) {
17381 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17382 dev_priv->display.get_initial_plane_config =
17383 i9xx_get_initial_plane_config;
17384 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
17385 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17386 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17387 } else if (!IS_GEN(dev_priv, 2)) {
17388 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17389 dev_priv->display.get_initial_plane_config =
17390 i9xx_get_initial_plane_config;
17391 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
17392 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17393 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17394 } else {
17395 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17396 dev_priv->display.get_initial_plane_config =
17397 i9xx_get_initial_plane_config;
17398 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
17399 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17400 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17401 }
17402
17403 if (IS_GEN(dev_priv, 5)) {
17404 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
17405 } else if (IS_GEN(dev_priv, 6)) {
17406 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
17407 } else if (IS_IVYBRIDGE(dev_priv)) {
17408 /* FIXME: detect B0+ stepping and use auto training */
17409 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
17410 }
17411
17412 if (INTEL_GEN(dev_priv) >= 9)
17413 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
17414 else
17415 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
17416
17417 }
17418
17419 void intel_modeset_init_hw(struct drm_i915_private *i915)
17420 {
17421 struct intel_cdclk_state *cdclk_state =
17422 to_intel_cdclk_state(i915->cdclk.obj.state);
17423
17424 intel_update_cdclk(i915);
17425 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
17426 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
17427 }
17428
17429 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
17430 {
17431 struct drm_plane *plane;
17432 struct drm_crtc *crtc;
17433
17434 drm_for_each_crtc(crtc, state->dev) {
17435 struct drm_crtc_state *crtc_state;
17436
17437 crtc_state = drm_atomic_get_crtc_state(state, crtc);
17438 if (IS_ERR(crtc_state))
17439 return PTR_ERR(crtc_state);
17440 }
17441
17442 drm_for_each_plane(plane, state->dev) {
17443 struct drm_plane_state *plane_state;
17444
17445 plane_state = drm_atomic_get_plane_state(state, plane);
17446 if (IS_ERR(plane_state))
17447 return PTR_ERR(plane_state);
17448 }
17449
17450 return 0;
17451 }
17452
17453 /*
17454 * Calculate what we think the watermarks should be for the state we've read
17455 * out of the hardware and then immediately program those watermarks so that
17456 * we ensure the hardware settings match our internal state.
17457 *
17458 * We can calculate what we think WM's should be by creating a duplicate of the
17459 * current state (which was constructed during hardware readout) and running it
17460 * through the atomic check code to calculate new watermark values in the
17461 * state object.
17462 */
17463 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
17464 {
17465 struct drm_atomic_state *state;
17466 struct intel_atomic_state *intel_state;
17467 struct intel_crtc *crtc;
17468 struct intel_crtc_state *crtc_state;
17469 struct drm_modeset_acquire_ctx ctx;
17470 int ret;
17471 int i;
17472
17473 /* Only supported on platforms that use atomic watermark design */
17474 if (!dev_priv->display.optimize_watermarks)
17475 return;
17476
17477 state = drm_atomic_state_alloc(&dev_priv->drm);
17478 if (drm_WARN_ON(&dev_priv->drm, !state))
17479 return;
17480
17481 intel_state = to_intel_atomic_state(state);
17482
17483 drm_modeset_acquire_init(&ctx, 0);
17484
17485 retry:
17486 state->acquire_ctx = &ctx;
17487
17488 /*
17489 * Hardware readout is the only time we don't want to calculate
17490 * intermediate watermarks (since we don't trust the current
17491 * watermarks).
17492 */
17493 if (!HAS_GMCH(dev_priv))
17494 intel_state->skip_intermediate_wm = true;
17495
17496 ret = sanitize_watermarks_add_affected(state);
17497 if (ret)
17498 goto fail;
17499
17500 ret = intel_atomic_check(&dev_priv->drm, state);
17501 if (ret)
17502 goto fail;
17503
17504 /* Write calculated watermark values back */
17505 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
17506 crtc_state->wm.need_postvbl_update = true;
17507 dev_priv->display.optimize_watermarks(intel_state, crtc);
17508
17509 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
17510 }
17511
17512 fail:
17513 if (ret == -EDEADLK) {
17514 drm_atomic_state_clear(state);
17515 drm_modeset_backoff(&ctx);
17516 goto retry;
17517 }
17518
17519 /*
17520 * If we fail here, it means that the hardware appears to be
17521 * programmed in a way that shouldn't be possible, given our
17522 * understanding of watermark requirements. This might mean a
17523 * mistake in the hardware readout code or a mistake in the
17524 * watermark calculations for a given platform. Raise a WARN
17525 * so that this is noticeable.
17526 *
17527 * If this actually happens, we'll have to just leave the
17528 * BIOS-programmed watermarks untouched and hope for the best.
17529 */
17530 drm_WARN(&dev_priv->drm, ret,
17531 "Could not determine valid watermarks for inherited state\n");
17532
17533 drm_atomic_state_put(state);
17534
17535 drm_modeset_drop_locks(&ctx);
17536 drm_modeset_acquire_fini(&ctx);
17537 }
17538
17539 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
17540 {
17541 if (IS_GEN(dev_priv, 5)) {
17542 u32 fdi_pll_clk =
17543 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
17544
17545 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
17546 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
17547 dev_priv->fdi_pll_freq = 270000;
17548 } else {
17549 return;
17550 }
17551
17552 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
17553 }
17554
17555 static int intel_initial_commit(struct drm_device *dev)
17556 {
17557 struct drm_atomic_state *state = NULL;
17558 struct drm_modeset_acquire_ctx ctx;
17559 struct intel_crtc *crtc;
17560 int ret = 0;
17561
17562 state = drm_atomic_state_alloc(dev);
17563 if (!state)
17564 return -ENOMEM;
17565
17566 drm_modeset_acquire_init(&ctx, 0);
17567
17568 retry:
17569 state->acquire_ctx = &ctx;
17570
17571 for_each_intel_crtc(dev, crtc) {
17572 struct intel_crtc_state *crtc_state =
17573 intel_atomic_get_crtc_state(state, crtc);
17574
17575 if (IS_ERR(crtc_state)) {
17576 ret = PTR_ERR(crtc_state);
17577 goto out;
17578 }
17579
17580 if (crtc_state->hw.active) {
17581 ret = drm_atomic_add_affected_planes(state, &crtc->base);
17582 if (ret)
17583 goto out;
17584
17585 /*
17586 * FIXME hack to force a LUT update to avoid the
17587 * plane update forcing the pipe gamma on without
17588 * having a proper LUT loaded. Remove once we
17589 * have readout for pipe gamma enable.
17590 */
17591 crtc_state->uapi.color_mgmt_changed = true;
17592
17593 /*
17594 * FIXME hack to force full modeset when DSC is being
17595 * used.
17596 *
17597 * As long as we do not have full state readout and
17598 * config comparison of crtc_state->dsc, we have no way
17599 * to ensure reliable fastset. Remove once we have
17600 * readout for DSC.
17601 */
17602 if (crtc_state->dsc.compression_enable) {
17603 ret = drm_atomic_add_affected_connectors(state,
17604 &crtc->base);
17605 if (ret)
17606 goto out;
17607 crtc_state->uapi.mode_changed = true;
17608 drm_dbg_kms(dev, "Force full modeset for DSC\n");
17609 }
17610 }
17611 }
17612
17613 ret = drm_atomic_commit(state);
17614
17615 out:
17616 if (ret == -EDEADLK) {
17617 drm_atomic_state_clear(state);
17618 drm_modeset_backoff(&ctx);
17619 goto retry;
17620 }
17621
17622 drm_atomic_state_put(state);
17623
17624 drm_modeset_drop_locks(&ctx);
17625 drm_modeset_acquire_fini(&ctx);
17626
17627 return ret;
17628 }
17629
17630 static void intel_mode_config_init(struct drm_i915_private *i915)
17631 {
17632 struct drm_mode_config *mode_config = &i915->drm.mode_config;
17633
17634 drm_mode_config_init(&i915->drm);
17635 INIT_LIST_HEAD(&i915->global_obj_list);
17636
17637 mode_config->min_width = 0;
17638 mode_config->min_height = 0;
17639
17640 mode_config->preferred_depth = 24;
17641 mode_config->prefer_shadow = 1;
17642
17643 mode_config->allow_fb_modifiers = true;
17644
17645 mode_config->funcs = &intel_mode_funcs;
17646
17647 /*
17648 * Maximum framebuffer dimensions, chosen to match
17649 * the maximum render engine surface size on gen4+.
17650 */
17651 if (INTEL_GEN(i915) >= 7) {
17652 mode_config->max_width = 16384;
17653 mode_config->max_height = 16384;
17654 } else if (INTEL_GEN(i915) >= 4) {
17655 mode_config->max_width = 8192;
17656 mode_config->max_height = 8192;
17657 } else if (IS_GEN(i915, 3)) {
17658 mode_config->max_width = 4096;
17659 mode_config->max_height = 4096;
17660 } else {
17661 mode_config->max_width = 2048;
17662 mode_config->max_height = 2048;
17663 }
17664
17665 if (IS_I845G(i915) || IS_I865G(i915)) {
17666 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17667 mode_config->cursor_height = 1023;
17668 } else if (IS_GEN(i915, 2)) {
17669 mode_config->cursor_width = 64;
17670 mode_config->cursor_height = 64;
17671 } else {
17672 mode_config->cursor_width = 256;
17673 mode_config->cursor_height = 256;
17674 }
17675 }
17676
17677 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17678 {
17679 intel_atomic_global_obj_cleanup(i915);
17680 drm_mode_config_cleanup(&i915->drm);
17681 }
17682
17683 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
17684 {
17685 if (plane_config->fb) {
17686 struct drm_framebuffer *fb = &plane_config->fb->base;
17687
17688 /* We may only have the stub and not a full framebuffer */
17689 if (drm_framebuffer_read_refcount(fb))
17690 drm_framebuffer_put(fb);
17691 else
17692 kfree(fb);
17693 }
17694
17695 if (plane_config->vma)
17696 i915_vma_put(plane_config->vma);
17697 }
17698
17699 /* part #1: call before irq install */
17700 int intel_modeset_init_noirq(struct drm_i915_private *i915)
17701 {
17702 int ret;
17703
17704 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17705 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17706 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17707
17708 intel_mode_config_init(i915);
17709
17710 ret = intel_cdclk_init(i915);
17711 if (ret)
17712 return ret;
17713
17714 ret = intel_bw_init(i915);
17715 if (ret)
17716 return ret;
17717
17718 init_llist_head(&i915->atomic_helper.free_list);
17719 INIT_WORK(&i915->atomic_helper.free_work,
17720 intel_atomic_helper_free_state_worker);
17721
17722 intel_init_quirks(i915);
17723
17724 intel_fbc_init(i915);
17725
17726 return 0;
17727 }
17728
17729 /* part #2: call after irq install */
17730 int intel_modeset_init(struct drm_i915_private *i915)
17731 {
17732 struct drm_device *dev = &i915->drm;
17733 enum pipe pipe;
17734 struct intel_crtc *crtc;
17735 int ret;
17736
17737 intel_init_pm(i915);
17738
17739 intel_panel_sanitize_ssc(i915);
17740
17741 intel_gmbus_setup(i915);
17742
17743 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17744 INTEL_NUM_PIPES(i915),
17745 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17746
17747 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17748 for_each_pipe(i915, pipe) {
17749 ret = intel_crtc_init(i915, pipe);
17750 if (ret) {
17751 intel_mode_config_cleanup(i915);
17752 return ret;
17753 }
17754 }
17755 }
17756
17757 intel_plane_possible_crtcs_init(i915);
17758 intel_shared_dpll_init(dev);
17759 intel_update_fdi_pll_freq(i915);
17760
17761 intel_update_czclk(i915);
17762 intel_modeset_init_hw(i915);
17763
17764 intel_hdcp_component_init(i915);
17765
17766 if (i915->max_cdclk_freq == 0)
17767 intel_update_max_cdclk(i915);
17768
17769 /* Just disable it once at startup */
17770 intel_vga_disable(i915);
17771 intel_setup_outputs(i915);
17772
17773 drm_modeset_lock_all(dev);
17774 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17775 drm_modeset_unlock_all(dev);
17776
17777 for_each_intel_crtc(dev, crtc) {
17778 struct intel_initial_plane_config plane_config = {};
17779
17780 if (!crtc->active)
17781 continue;
17782
17783 /*
17784 * Note that reserving the BIOS fb up front prevents us
17785 * from stuffing other stolen allocations like the ring
17786 * on top. This prevents some ugliness at boot time, and
17787 * can even allow for smooth boot transitions if the BIOS
17788 * fb is large enough for the active pipe configuration.
17789 */
17790 i915->display.get_initial_plane_config(crtc, &plane_config);
17791
17792 /*
17793 * If the fb is shared between multiple heads, we'll
17794 * just get the first one.
17795 */
17796 intel_find_initial_plane_obj(crtc, &plane_config);
17797
17798 plane_config_fini(&plane_config);
17799 }
17800
17801 /*
17802 * Make sure hardware watermarks really match the state we read out.
17803 * Note that we need to do this after reconstructing the BIOS fb's
17804 * since the watermark calculation done here will use pstate->fb.
17805 */
17806 if (!HAS_GMCH(i915))
17807 sanitize_watermarks(i915);
17808
17809 /*
17810 * Force all active planes to recompute their states. So that on
17811 * mode_setcrtc after probe, all the intel_plane_state variables
17812 * are already calculated and there is no assert_plane warnings
17813 * during bootup.
17814 */
17815 ret = intel_initial_commit(dev);
17816 if (ret)
17817 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
17818
17819 return 0;
17820 }
17821
17822 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17823 {
17824 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17825 /* 640x480@60Hz, ~25175 kHz */
17826 struct dpll clock = {
17827 .m1 = 18,
17828 .m2 = 7,
17829 .p1 = 13,
17830 .p2 = 4,
17831 .n = 2,
17832 };
17833 u32 dpll, fp;
17834 int i;
17835
17836 drm_WARN_ON(&dev_priv->drm,
17837 i9xx_calc_dpll_params(48000, &clock) != 25154);
17838
17839 drm_dbg_kms(&dev_priv->drm,
17840 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17841 pipe_name(pipe), clock.vco, clock.dot);
17842
17843 fp = i9xx_dpll_compute_fp(&clock);
17844 dpll = DPLL_DVO_2X_MODE |
17845 DPLL_VGA_MODE_DIS |
17846 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17847 PLL_P2_DIVIDE_BY_4 |
17848 PLL_REF_INPUT_DREFCLK |
17849 DPLL_VCO_ENABLE;
17850
17851 intel_de_write(dev_priv, FP0(pipe), fp);
17852 intel_de_write(dev_priv, FP1(pipe), fp);
17853
17854 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17855 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17856 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17857 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17858 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17859 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17860 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17861
17862 /*
17863 * Apparently we need to have VGA mode enabled prior to changing
17864 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17865 * dividers, even though the register value does change.
17866 */
17867 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17868 intel_de_write(dev_priv, DPLL(pipe), dpll);
17869
17870 /* Wait for the clocks to stabilize. */
17871 intel_de_posting_read(dev_priv, DPLL(pipe));
17872 udelay(150);
17873
17874 /* The pixel multiplier can only be updated once the
17875 * DPLL is enabled and the clocks are stable.
17876 *
17877 * So write it again.
17878 */
17879 intel_de_write(dev_priv, DPLL(pipe), dpll);
17880
17881 /* We do this three times for luck */
17882 for (i = 0; i < 3 ; i++) {
17883 intel_de_write(dev_priv, DPLL(pipe), dpll);
17884 intel_de_posting_read(dev_priv, DPLL(pipe));
17885 udelay(150); /* wait for warmup */
17886 }
17887
17888 intel_de_write(dev_priv, PIPECONF(pipe),
17889 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17890 intel_de_posting_read(dev_priv, PIPECONF(pipe));
17891
17892 intel_wait_for_pipe_scanline_moving(crtc);
17893 }
17894
17895 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17896 {
17897 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17898
17899 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
17900 pipe_name(pipe));
17901
17902 drm_WARN_ON(&dev_priv->drm,
17903 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
17904 DISPLAY_PLANE_ENABLE);
17905 drm_WARN_ON(&dev_priv->drm,
17906 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
17907 DISPLAY_PLANE_ENABLE);
17908 drm_WARN_ON(&dev_priv->drm,
17909 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
17910 DISPLAY_PLANE_ENABLE);
17911 drm_WARN_ON(&dev_priv->drm,
17912 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
17913 drm_WARN_ON(&dev_priv->drm,
17914 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
17915
17916 intel_de_write(dev_priv, PIPECONF(pipe), 0);
17917 intel_de_posting_read(dev_priv, PIPECONF(pipe));
17918
17919 intel_wait_for_pipe_scanline_stopped(crtc);
17920
17921 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
17922 intel_de_posting_read(dev_priv, DPLL(pipe));
17923 }
17924
17925 static void
17926 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17927 {
17928 struct intel_crtc *crtc;
17929
17930 if (INTEL_GEN(dev_priv) >= 4)
17931 return;
17932
17933 for_each_intel_crtc(&dev_priv->drm, crtc) {
17934 struct intel_plane *plane =
17935 to_intel_plane(crtc->base.primary);
17936 struct intel_crtc *plane_crtc;
17937 enum pipe pipe;
17938
17939 if (!plane->get_hw_state(plane, &pipe))
17940 continue;
17941
17942 if (pipe == crtc->pipe)
17943 continue;
17944
17945 drm_dbg_kms(&dev_priv->drm,
17946 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17947 plane->base.base.id, plane->base.name);
17948
17949 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17950 intel_plane_disable_noatomic(plane_crtc, plane);
17951 }
17952 }
17953
17954 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17955 {
17956 struct drm_device *dev = crtc->base.dev;
17957 struct intel_encoder *encoder;
17958
17959 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17960 return true;
17961
17962 return false;
17963 }
17964
17965 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17966 {
17967 struct drm_device *dev = encoder->base.dev;
17968 struct intel_connector *connector;
17969
17970 for_each_connector_on_encoder(dev, &encoder->base, connector)
17971 return connector;
17972
17973 return NULL;
17974 }
17975
17976 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17977 enum pipe pch_transcoder)
17978 {
17979 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17980 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17981 }
17982
17983 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17984 {
17985 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17986 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17987 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17988
17989 if (INTEL_GEN(dev_priv) >= 9 ||
17990 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17991 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17992 u32 val;
17993
17994 if (transcoder_is_dsi(cpu_transcoder))
17995 return;
17996
17997 val = intel_de_read(dev_priv, reg);
17998 val &= ~HSW_FRAME_START_DELAY_MASK;
17999 val |= HSW_FRAME_START_DELAY(0);
18000 intel_de_write(dev_priv, reg, val);
18001 } else {
18002 i915_reg_t reg = PIPECONF(cpu_transcoder);
18003 u32 val;
18004
18005 val = intel_de_read(dev_priv, reg);
18006 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
18007 val |= PIPECONF_FRAME_START_DELAY(0);
18008 intel_de_write(dev_priv, reg, val);
18009 }
18010
18011 if (!crtc_state->has_pch_encoder)
18012 return;
18013
18014 if (HAS_PCH_IBX(dev_priv)) {
18015 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
18016 u32 val;
18017
18018 val = intel_de_read(dev_priv, reg);
18019 val &= ~TRANS_FRAME_START_DELAY_MASK;
18020 val |= TRANS_FRAME_START_DELAY(0);
18021 intel_de_write(dev_priv, reg, val);
18022 } else {
18023 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
18024 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
18025 u32 val;
18026
18027 val = intel_de_read(dev_priv, reg);
18028 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
18029 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
18030 intel_de_write(dev_priv, reg, val);
18031 }
18032 }
18033
18034 static void intel_sanitize_crtc(struct intel_crtc *crtc,
18035 struct drm_modeset_acquire_ctx *ctx)
18036 {
18037 struct drm_device *dev = crtc->base.dev;
18038 struct drm_i915_private *dev_priv = to_i915(dev);
18039 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
18040
18041 if (crtc_state->hw.active) {
18042 struct intel_plane *plane;
18043
18044 /* Clear any frame start delays used for debugging left by the BIOS */
18045 intel_sanitize_frame_start_delay(crtc_state);
18046
18047 /* Disable everything but the primary plane */
18048 for_each_intel_plane_on_crtc(dev, crtc, plane) {
18049 const struct intel_plane_state *plane_state =
18050 to_intel_plane_state(plane->base.state);
18051
18052 if (plane_state->uapi.visible &&
18053 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
18054 intel_plane_disable_noatomic(crtc, plane);
18055 }
18056
18057 /*
18058 * Disable any background color set by the BIOS, but enable the
18059 * gamma and CSC to match how we program our planes.
18060 */
18061 if (INTEL_GEN(dev_priv) >= 9)
18062 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
18063 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
18064 }
18065
18066 /* Adjust the state of the output pipe according to whether we
18067 * have active connectors/encoders. */
18068 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
18069 intel_crtc_disable_noatomic(crtc, ctx);
18070
18071 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
18072 /*
18073 * We start out with underrun reporting disabled to avoid races.
18074 * For correct bookkeeping mark this on active crtcs.
18075 *
18076 * Also on gmch platforms we dont have any hardware bits to
18077 * disable the underrun reporting. Which means we need to start
18078 * out with underrun reporting disabled also on inactive pipes,
18079 * since otherwise we'll complain about the garbage we read when
18080 * e.g. coming up after runtime pm.
18081 *
18082 * No protection against concurrent access is required - at
18083 * worst a fifo underrun happens which also sets this to false.
18084 */
18085 crtc->cpu_fifo_underrun_disabled = true;
18086 /*
18087 * We track the PCH trancoder underrun reporting state
18088 * within the crtc. With crtc for pipe A housing the underrun
18089 * reporting state for PCH transcoder A, crtc for pipe B housing
18090 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
18091 * and marking underrun reporting as disabled for the non-existing
18092 * PCH transcoders B and C would prevent enabling the south
18093 * error interrupt (see cpt_can_enable_serr_int()).
18094 */
18095 if (has_pch_trancoder(dev_priv, crtc->pipe))
18096 crtc->pch_fifo_underrun_disabled = true;
18097 }
18098 }
18099
18100 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
18101 {
18102 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
18103
18104 /*
18105 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
18106 * the hardware when a high res displays plugged in. DPLL P
18107 * divider is zero, and the pipe timings are bonkers. We'll
18108 * try to disable everything in that case.
18109 *
18110 * FIXME would be nice to be able to sanitize this state
18111 * without several WARNs, but for now let's take the easy
18112 * road.
18113 */
18114 return IS_GEN(dev_priv, 6) &&
18115 crtc_state->hw.active &&
18116 crtc_state->shared_dpll &&
18117 crtc_state->port_clock == 0;
18118 }
18119
18120 static void intel_sanitize_encoder(struct intel_encoder *encoder)
18121 {
18122 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
18123 struct intel_connector *connector;
18124 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18125 struct intel_crtc_state *crtc_state = crtc ?
18126 to_intel_crtc_state(crtc->base.state) : NULL;
18127
18128 /* We need to check both for a crtc link (meaning that the
18129 * encoder is active and trying to read from a pipe) and the
18130 * pipe itself being active. */
18131 bool has_active_crtc = crtc_state &&
18132 crtc_state->hw.active;
18133
18134 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
18135 drm_dbg_kms(&dev_priv->drm,
18136 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
18137 pipe_name(crtc->pipe));
18138 has_active_crtc = false;
18139 }
18140
18141 connector = intel_encoder_find_connector(encoder);
18142 if (connector && !has_active_crtc) {
18143 drm_dbg_kms(&dev_priv->drm,
18144 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
18145 encoder->base.base.id,
18146 encoder->base.name);
18147
18148 /* Connector is active, but has no active pipe. This is
18149 * fallout from our resume register restoring. Disable
18150 * the encoder manually again. */
18151 if (crtc_state) {
18152 struct drm_encoder *best_encoder;
18153
18154 drm_dbg_kms(&dev_priv->drm,
18155 "[ENCODER:%d:%s] manually disabled\n",
18156 encoder->base.base.id,
18157 encoder->base.name);
18158
18159 /* avoid oopsing in case the hooks consult best_encoder */
18160 best_encoder = connector->base.state->best_encoder;
18161 connector->base.state->best_encoder = &encoder->base;
18162
18163 /* FIXME NULL atomic state passed! */
18164 if (encoder->disable)
18165 encoder->disable(NULL, encoder, crtc_state,
18166 connector->base.state);
18167 if (encoder->post_disable)
18168 encoder->post_disable(NULL, encoder, crtc_state,
18169 connector->base.state);
18170
18171 connector->base.state->best_encoder = best_encoder;
18172 }
18173 encoder->base.crtc = NULL;
18174
18175 /* Inconsistent output/port/pipe state happens presumably due to
18176 * a bug in one of the get_hw_state functions. Or someplace else
18177 * in our code, like the register restore mess on resume. Clamp
18178 * things to off as a safer default. */
18179
18180 connector->base.dpms = DRM_MODE_DPMS_OFF;
18181 connector->base.encoder = NULL;
18182 }
18183
18184 /* notify opregion of the sanitized encoder state */
18185 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
18186
18187 if (INTEL_GEN(dev_priv) >= 11)
18188 icl_sanitize_encoder_pll_mapping(encoder);
18189 }
18190
18191 /* FIXME read out full plane state for all planes */
18192 static void readout_plane_state(struct drm_i915_private *dev_priv)
18193 {
18194 struct intel_plane *plane;
18195 struct intel_crtc *crtc;
18196
18197 for_each_intel_plane(&dev_priv->drm, plane) {
18198 struct intel_plane_state *plane_state =
18199 to_intel_plane_state(plane->base.state);
18200 struct intel_crtc_state *crtc_state;
18201 enum pipe pipe = PIPE_A;
18202 bool visible;
18203
18204 visible = plane->get_hw_state(plane, &pipe);
18205
18206 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18207 crtc_state = to_intel_crtc_state(crtc->base.state);
18208
18209 intel_set_plane_visible(crtc_state, plane_state, visible);
18210
18211 drm_dbg_kms(&dev_priv->drm,
18212 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
18213 plane->base.base.id, plane->base.name,
18214 enableddisabled(visible), pipe_name(pipe));
18215 }
18216
18217 for_each_intel_crtc(&dev_priv->drm, crtc) {
18218 struct intel_crtc_state *crtc_state =
18219 to_intel_crtc_state(crtc->base.state);
18220
18221 fixup_active_planes(crtc_state);
18222 }
18223 }
18224
18225 static void intel_modeset_readout_hw_state(struct drm_device *dev)
18226 {
18227 struct drm_i915_private *dev_priv = to_i915(dev);
18228 struct intel_cdclk_state *cdclk_state =
18229 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
18230 enum pipe pipe;
18231 struct intel_crtc *crtc;
18232 struct intel_encoder *encoder;
18233 struct intel_connector *connector;
18234 struct drm_connector_list_iter conn_iter;
18235 u8 active_pipes = 0;
18236
18237 for_each_intel_crtc(dev, crtc) {
18238 struct intel_crtc_state *crtc_state =
18239 to_intel_crtc_state(crtc->base.state);
18240
18241 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
18242 intel_crtc_free_hw_state(crtc_state);
18243 intel_crtc_state_reset(crtc_state, crtc);
18244
18245 crtc_state->hw.active = crtc_state->hw.enable =
18246 dev_priv->display.get_pipe_config(crtc, crtc_state);
18247
18248 crtc->base.enabled = crtc_state->hw.enable;
18249 crtc->active = crtc_state->hw.active;
18250
18251 if (crtc_state->hw.active)
18252 active_pipes |= BIT(crtc->pipe);
18253
18254 drm_dbg_kms(&dev_priv->drm,
18255 "[CRTC:%d:%s] hw state readout: %s\n",
18256 crtc->base.base.id, crtc->base.name,
18257 enableddisabled(crtc_state->hw.active));
18258 }
18259
18260 dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
18261
18262 readout_plane_state(dev_priv);
18263
18264 intel_dpll_readout_hw_state(dev_priv);
18265
18266 for_each_intel_encoder(dev, encoder) {
18267 pipe = 0;
18268
18269 if (encoder->get_hw_state(encoder, &pipe)) {
18270 struct intel_crtc_state *crtc_state;
18271
18272 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18273 crtc_state = to_intel_crtc_state(crtc->base.state);
18274
18275 encoder->base.crtc = &crtc->base;
18276 encoder->get_config(encoder, crtc_state);
18277 } else {
18278 encoder->base.crtc = NULL;
18279 }
18280
18281 drm_dbg_kms(&dev_priv->drm,
18282 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
18283 encoder->base.base.id, encoder->base.name,
18284 enableddisabled(encoder->base.crtc),
18285 pipe_name(pipe));
18286 }
18287
18288 drm_connector_list_iter_begin(dev, &conn_iter);
18289 for_each_intel_connector_iter(connector, &conn_iter) {
18290 if (connector->get_hw_state(connector)) {
18291 struct intel_crtc_state *crtc_state;
18292 struct intel_crtc *crtc;
18293
18294 connector->base.dpms = DRM_MODE_DPMS_ON;
18295
18296 encoder = intel_attached_encoder(connector);
18297 connector->base.encoder = &encoder->base;
18298
18299 crtc = to_intel_crtc(encoder->base.crtc);
18300 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
18301
18302 if (crtc_state && crtc_state->hw.active) {
18303 /*
18304 * This has to be done during hardware readout
18305 * because anything calling .crtc_disable may
18306 * rely on the connector_mask being accurate.
18307 */
18308 crtc_state->uapi.connector_mask |=
18309 drm_connector_mask(&connector->base);
18310 crtc_state->uapi.encoder_mask |=
18311 drm_encoder_mask(&encoder->base);
18312 }
18313 } else {
18314 connector->base.dpms = DRM_MODE_DPMS_OFF;
18315 connector->base.encoder = NULL;
18316 }
18317 drm_dbg_kms(&dev_priv->drm,
18318 "[CONNECTOR:%d:%s] hw state readout: %s\n",
18319 connector->base.base.id, connector->base.name,
18320 enableddisabled(connector->base.encoder));
18321 }
18322 drm_connector_list_iter_end(&conn_iter);
18323
18324 for_each_intel_crtc(dev, crtc) {
18325 struct intel_bw_state *bw_state =
18326 to_intel_bw_state(dev_priv->bw_obj.state);
18327 struct intel_crtc_state *crtc_state =
18328 to_intel_crtc_state(crtc->base.state);
18329 struct intel_plane *plane;
18330 int min_cdclk = 0;
18331
18332 if (crtc_state->hw.active) {
18333 struct drm_display_mode *mode = &crtc_state->hw.mode;
18334
18335 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
18336 crtc_state);
18337
18338 *mode = crtc_state->hw.adjusted_mode;
18339 mode->hdisplay = crtc_state->pipe_src_w;
18340 mode->vdisplay = crtc_state->pipe_src_h;
18341
18342 /*
18343 * The initial mode needs to be set in order to keep
18344 * the atomic core happy. It wants a valid mode if the
18345 * crtc's enabled, so we do the above call.
18346 *
18347 * But we don't set all the derived state fully, hence
18348 * set a flag to indicate that a full recalculation is
18349 * needed on the next commit.
18350 */
18351 mode->private_flags = I915_MODE_FLAG_INHERITED;
18352
18353 intel_crtc_compute_pixel_rate(crtc_state);
18354
18355 intel_crtc_update_active_timings(crtc_state);
18356
18357 intel_crtc_copy_hw_to_uapi_state(crtc_state);
18358 }
18359
18360 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
18361 const struct intel_plane_state *plane_state =
18362 to_intel_plane_state(plane->base.state);
18363
18364 /*
18365 * FIXME don't have the fb yet, so can't
18366 * use intel_plane_data_rate() :(
18367 */
18368 if (plane_state->uapi.visible)
18369 crtc_state->data_rate[plane->id] =
18370 4 * crtc_state->pixel_rate;
18371 /*
18372 * FIXME don't have the fb yet, so can't
18373 * use plane->min_cdclk() :(
18374 */
18375 if (plane_state->uapi.visible && plane->min_cdclk) {
18376 if (crtc_state->double_wide ||
18377 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
18378 crtc_state->min_cdclk[plane->id] =
18379 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
18380 else
18381 crtc_state->min_cdclk[plane->id] =
18382 crtc_state->pixel_rate;
18383 }
18384 drm_dbg_kms(&dev_priv->drm,
18385 "[PLANE:%d:%s] min_cdclk %d kHz\n",
18386 plane->base.base.id, plane->base.name,
18387 crtc_state->min_cdclk[plane->id]);
18388 }
18389
18390 if (crtc_state->hw.active) {
18391 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
18392 if (drm_WARN_ON(dev, min_cdclk < 0))
18393 min_cdclk = 0;
18394 }
18395
18396 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
18397 cdclk_state->min_voltage_level[crtc->pipe] =
18398 crtc_state->min_voltage_level;
18399
18400 intel_bw_crtc_update(bw_state, crtc_state);
18401
18402 intel_pipe_config_sanity_check(dev_priv, crtc_state);
18403 }
18404 }
18405
18406 static void
18407 get_encoder_power_domains(struct drm_i915_private *dev_priv)
18408 {
18409 struct intel_encoder *encoder;
18410
18411 for_each_intel_encoder(&dev_priv->drm, encoder) {
18412 struct intel_crtc_state *crtc_state;
18413
18414 if (!encoder->get_power_domains)
18415 continue;
18416
18417 /*
18418 * MST-primary and inactive encoders don't have a crtc state
18419 * and neither of these require any power domain references.
18420 */
18421 if (!encoder->base.crtc)
18422 continue;
18423
18424 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
18425 encoder->get_power_domains(encoder, crtc_state);
18426 }
18427 }
18428
18429 static void intel_early_display_was(struct drm_i915_private *dev_priv)
18430 {
18431 /*
18432 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
18433 * Also known as Wa_14010480278.
18434 */
18435 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
18436 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
18437 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
18438
18439 if (IS_HASWELL(dev_priv)) {
18440 /*
18441 * WaRsPkgCStateDisplayPMReq:hsw
18442 * System hang if this isn't done before disabling all planes!
18443 */
18444 intel_de_write(dev_priv, CHICKEN_PAR1_1,
18445 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
18446 }
18447 }
18448
18449 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
18450 enum port port, i915_reg_t hdmi_reg)
18451 {
18452 u32 val = intel_de_read(dev_priv, hdmi_reg);
18453
18454 if (val & SDVO_ENABLE ||
18455 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
18456 return;
18457
18458 drm_dbg_kms(&dev_priv->drm,
18459 "Sanitizing transcoder select for HDMI %c\n",
18460 port_name(port));
18461
18462 val &= ~SDVO_PIPE_SEL_MASK;
18463 val |= SDVO_PIPE_SEL(PIPE_A);
18464
18465 intel_de_write(dev_priv, hdmi_reg, val);
18466 }
18467
18468 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
18469 enum port port, i915_reg_t dp_reg)
18470 {
18471 u32 val = intel_de_read(dev_priv, dp_reg);
18472
18473 if (val & DP_PORT_EN ||
18474 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
18475 return;
18476
18477 drm_dbg_kms(&dev_priv->drm,
18478 "Sanitizing transcoder select for DP %c\n",
18479 port_name(port));
18480
18481 val &= ~DP_PIPE_SEL_MASK;
18482 val |= DP_PIPE_SEL(PIPE_A);
18483
18484 intel_de_write(dev_priv, dp_reg, val);
18485 }
18486
18487 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18488 {
18489 /*
18490 * The BIOS may select transcoder B on some of the PCH
18491 * ports even it doesn't enable the port. This would trip
18492 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18493 * Sanitize the transcoder select bits to prevent that. We
18494 * assume that the BIOS never actually enabled the port,
18495 * because if it did we'd actually have to toggle the port
18496 * on and back off to make the transcoder A select stick
18497 * (see. intel_dp_link_down(), intel_disable_hdmi(),
18498 * intel_disable_sdvo()).
18499 */
18500 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18501 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18502 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18503
18504 /* PCH SDVOB multiplex with HDMIB */
18505 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18506 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18507 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18508 }
18509
18510 /* Scan out the current hw modeset state,
18511 * and sanitizes it to the current state
18512 */
18513 static void
18514 intel_modeset_setup_hw_state(struct drm_device *dev,
18515 struct drm_modeset_acquire_ctx *ctx)
18516 {
18517 struct drm_i915_private *dev_priv = to_i915(dev);
18518 struct intel_encoder *encoder;
18519 struct intel_crtc *crtc;
18520 intel_wakeref_t wakeref;
18521
18522 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18523
18524 intel_early_display_was(dev_priv);
18525 intel_modeset_readout_hw_state(dev);
18526
18527 /* HW state is read out, now we need to sanitize this mess. */
18528
18529 /* Sanitize the TypeC port mode upfront, encoders depend on this */
18530 for_each_intel_encoder(dev, encoder) {
18531 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18532
18533 /* We need to sanitize only the MST primary port. */
18534 if (encoder->type != INTEL_OUTPUT_DP_MST &&
18535 intel_phy_is_tc(dev_priv, phy))
18536 intel_tc_port_sanitize(enc_to_dig_port(encoder));
18537 }
18538
18539 get_encoder_power_domains(dev_priv);
18540
18541 if (HAS_PCH_IBX(dev_priv))
18542 ibx_sanitize_pch_ports(dev_priv);
18543
18544 /*
18545 * intel_sanitize_plane_mapping() may need to do vblank
18546 * waits, so we need vblank interrupts restored beforehand.
18547 */
18548 for_each_intel_crtc(&dev_priv->drm, crtc) {
18549 struct intel_crtc_state *crtc_state =
18550 to_intel_crtc_state(crtc->base.state);
18551
18552 drm_crtc_vblank_reset(&crtc->base);
18553
18554 if (crtc_state->hw.active)
18555 intel_crtc_vblank_on(crtc_state);
18556 }
18557
18558 intel_sanitize_plane_mapping(dev_priv);
18559
18560 for_each_intel_encoder(dev, encoder)
18561 intel_sanitize_encoder(encoder);
18562
18563 for_each_intel_crtc(&dev_priv->drm, crtc) {
18564 struct intel_crtc_state *crtc_state =
18565 to_intel_crtc_state(crtc->base.state);
18566
18567 intel_sanitize_crtc(crtc, ctx);
18568 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18569 }
18570
18571 intel_modeset_update_connector_atomic_state(dev);
18572
18573 intel_dpll_sanitize_state(dev_priv);
18574
18575 if (IS_G4X(dev_priv)) {
18576 g4x_wm_get_hw_state(dev_priv);
18577 g4x_wm_sanitize(dev_priv);
18578 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18579 vlv_wm_get_hw_state(dev_priv);
18580 vlv_wm_sanitize(dev_priv);
18581 } else if (INTEL_GEN(dev_priv) >= 9) {
18582 skl_wm_get_hw_state(dev_priv);
18583 } else if (HAS_PCH_SPLIT(dev_priv)) {
18584 ilk_wm_get_hw_state(dev_priv);
18585 }
18586
18587 for_each_intel_crtc(dev, crtc) {
18588 struct intel_crtc_state *crtc_state =
18589 to_intel_crtc_state(crtc->base.state);
18590 u64 put_domains;
18591
18592 put_domains = modeset_get_crtc_power_domains(crtc_state);
18593 if (drm_WARN_ON(dev, put_domains))
18594 modeset_put_power_domains(dev_priv, put_domains);
18595 }
18596
18597 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18598 }
18599
18600 void intel_display_resume(struct drm_device *dev)
18601 {
18602 struct drm_i915_private *dev_priv = to_i915(dev);
18603 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18604 struct drm_modeset_acquire_ctx ctx;
18605 int ret;
18606
18607 dev_priv->modeset_restore_state = NULL;
18608 if (state)
18609 state->acquire_ctx = &ctx;
18610
18611 drm_modeset_acquire_init(&ctx, 0);
18612
18613 while (1) {
18614 ret = drm_modeset_lock_all_ctx(dev, &ctx);
18615 if (ret != -EDEADLK)
18616 break;
18617
18618 drm_modeset_backoff(&ctx);
18619 }
18620
18621 if (!ret)
18622 ret = __intel_display_resume(dev, state, &ctx);
18623
18624 intel_enable_ipc(dev_priv);
18625 drm_modeset_drop_locks(&ctx);
18626 drm_modeset_acquire_fini(&ctx);
18627
18628 if (ret)
18629 drm_err(&dev_priv->drm,
18630 "Restoring old state failed with %i\n", ret);
18631 if (state)
18632 drm_atomic_state_put(state);
18633 }
18634
18635 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18636 {
18637 struct intel_connector *connector;
18638 struct drm_connector_list_iter conn_iter;
18639
18640 /* Kill all the work that may have been queued by hpd. */
18641 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18642 for_each_intel_connector_iter(connector, &conn_iter) {
18643 if (connector->modeset_retry_work.func)
18644 cancel_work_sync(&connector->modeset_retry_work);
18645 if (connector->hdcp.shim) {
18646 cancel_delayed_work_sync(&connector->hdcp.check_work);
18647 cancel_work_sync(&connector->hdcp.prop_work);
18648 }
18649 }
18650 drm_connector_list_iter_end(&conn_iter);
18651 }
18652
18653 /* part #1: call before irq uninstall */
18654 void intel_modeset_driver_remove(struct drm_i915_private *i915)
18655 {
18656 flush_workqueue(i915->flip_wq);
18657 flush_workqueue(i915->modeset_wq);
18658
18659 flush_work(&i915->atomic_helper.free_work);
18660 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
18661 }
18662
18663 /* part #2: call after irq uninstall */
18664 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
18665 {
18666 /*
18667 * Due to the hpd irq storm handling the hotplug work can re-arm the
18668 * poll handlers. Hence disable polling after hpd handling is shut down.
18669 */
18670 intel_hpd_poll_fini(i915);
18671
18672 /*
18673 * MST topology needs to be suspended so we don't have any calls to
18674 * fbdev after it's finalized. MST will be destroyed later as part of
18675 * drm_mode_config_cleanup()
18676 */
18677 intel_dp_mst_suspend(i915);
18678
18679 /* poll work can call into fbdev, hence clean that up afterwards */
18680 intel_fbdev_fini(i915);
18681
18682 intel_unregister_dsm_handler();
18683
18684 intel_fbc_global_disable(i915);
18685
18686 /* flush any delayed tasks or pending work */
18687 flush_scheduled_work();
18688
18689 intel_hdcp_component_fini(i915);
18690
18691 intel_mode_config_cleanup(i915);
18692
18693 intel_overlay_cleanup(i915);
18694
18695 intel_gmbus_teardown(i915);
18696
18697 destroy_workqueue(i915->flip_wq);
18698 destroy_workqueue(i915->modeset_wq);
18699
18700 intel_fbc_cleanup_cfb(i915);
18701 }
18702
18703 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18704
18705 struct intel_display_error_state {
18706
18707 u32 power_well_driver;
18708
18709 struct intel_cursor_error_state {
18710 u32 control;
18711 u32 position;
18712 u32 base;
18713 u32 size;
18714 } cursor[I915_MAX_PIPES];
18715
18716 struct intel_pipe_error_state {
18717 bool power_domain_on;
18718 u32 source;
18719 u32 stat;
18720 } pipe[I915_MAX_PIPES];
18721
18722 struct intel_plane_error_state {
18723 u32 control;
18724 u32 stride;
18725 u32 size;
18726 u32 pos;
18727 u32 addr;
18728 u32 surface;
18729 u32 tile_offset;
18730 } plane[I915_MAX_PIPES];
18731
18732 struct intel_transcoder_error_state {
18733 bool available;
18734 bool power_domain_on;
18735 enum transcoder cpu_transcoder;
18736
18737 u32 conf;
18738
18739 u32 htotal;
18740 u32 hblank;
18741 u32 hsync;
18742 u32 vtotal;
18743 u32 vblank;
18744 u32 vsync;
18745 } transcoder[5];
18746 };
18747
18748 struct intel_display_error_state *
18749 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18750 {
18751 struct intel_display_error_state *error;
18752 int transcoders[] = {
18753 TRANSCODER_A,
18754 TRANSCODER_B,
18755 TRANSCODER_C,
18756 TRANSCODER_D,
18757 TRANSCODER_EDP,
18758 };
18759 int i;
18760
18761 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18762
18763 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18764 return NULL;
18765
18766 error = kzalloc(sizeof(*error), GFP_ATOMIC);
18767 if (error == NULL)
18768 return NULL;
18769
18770 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18771 error->power_well_driver = intel_de_read(dev_priv,
18772 HSW_PWR_WELL_CTL2);
18773
18774 for_each_pipe(dev_priv, i) {
18775 error->pipe[i].power_domain_on =
18776 __intel_display_power_is_enabled(dev_priv,
18777 POWER_DOMAIN_PIPE(i));
18778 if (!error->pipe[i].power_domain_on)
18779 continue;
18780
18781 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
18782 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
18783 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
18784
18785 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
18786 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
18787 if (INTEL_GEN(dev_priv) <= 3) {
18788 error->plane[i].size = intel_de_read(dev_priv,
18789 DSPSIZE(i));
18790 error->plane[i].pos = intel_de_read(dev_priv,
18791 DSPPOS(i));
18792 }
18793 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18794 error->plane[i].addr = intel_de_read(dev_priv,
18795 DSPADDR(i));
18796 if (INTEL_GEN(dev_priv) >= 4) {
18797 error->plane[i].surface = intel_de_read(dev_priv,
18798 DSPSURF(i));
18799 error->plane[i].tile_offset = intel_de_read(dev_priv,
18800 DSPTILEOFF(i));
18801 }
18802
18803 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
18804
18805 if (HAS_GMCH(dev_priv))
18806 error->pipe[i].stat = intel_de_read(dev_priv,
18807 PIPESTAT(i));
18808 }
18809
18810 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18811 enum transcoder cpu_transcoder = transcoders[i];
18812
18813 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
18814 continue;
18815
18816 error->transcoder[i].available = true;
18817 error->transcoder[i].power_domain_on =
18818 __intel_display_power_is_enabled(dev_priv,
18819 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18820 if (!error->transcoder[i].power_domain_on)
18821 continue;
18822
18823 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18824
18825 error->transcoder[i].conf = intel_de_read(dev_priv,
18826 PIPECONF(cpu_transcoder));
18827 error->transcoder[i].htotal = intel_de_read(dev_priv,
18828 HTOTAL(cpu_transcoder));
18829 error->transcoder[i].hblank = intel_de_read(dev_priv,
18830 HBLANK(cpu_transcoder));
18831 error->transcoder[i].hsync = intel_de_read(dev_priv,
18832 HSYNC(cpu_transcoder));
18833 error->transcoder[i].vtotal = intel_de_read(dev_priv,
18834 VTOTAL(cpu_transcoder));
18835 error->transcoder[i].vblank = intel_de_read(dev_priv,
18836 VBLANK(cpu_transcoder));
18837 error->transcoder[i].vsync = intel_de_read(dev_priv,
18838 VSYNC(cpu_transcoder));
18839 }
18840
18841 return error;
18842 }
18843
18844 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18845
18846 void
18847 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18848 struct intel_display_error_state *error)
18849 {
18850 struct drm_i915_private *dev_priv = m->i915;
18851 int i;
18852
18853 if (!error)
18854 return;
18855
18856 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18857 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18858 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18859 error->power_well_driver);
18860 for_each_pipe(dev_priv, i) {
18861 err_printf(m, "Pipe [%d]:\n", i);
18862 err_printf(m, " Power: %s\n",
18863 onoff(error->pipe[i].power_domain_on));
18864 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
18865 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
18866
18867 err_printf(m, "Plane [%d]:\n", i);
18868 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
18869 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
18870 if (INTEL_GEN(dev_priv) <= 3) {
18871 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
18872 err_printf(m, " POS: %08x\n", error->plane[i].pos);
18873 }
18874 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18875 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
18876 if (INTEL_GEN(dev_priv) >= 4) {
18877 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
18878 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
18879 }
18880
18881 err_printf(m, "Cursor [%d]:\n", i);
18882 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
18883 err_printf(m, " POS: %08x\n", error->cursor[i].position);
18884 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
18885 }
18886
18887 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18888 if (!error->transcoder[i].available)
18889 continue;
18890
18891 err_printf(m, "CPU transcoder: %s\n",
18892 transcoder_name(error->transcoder[i].cpu_transcoder));
18893 err_printf(m, " Power: %s\n",
18894 onoff(error->transcoder[i].power_domain_on));
18895 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
18896 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
18897 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
18898 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
18899 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
18900 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
18901 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
18902 }
18903 }
18904
18905 #endif